repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
bathepawan/workload-automation | wlauto/utils/fps.py | 1 | 7686 | # Copyright 2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections
try:
import pandas as pd
except ImportError:
pd = None
SurfaceFlingerFrame = collections.namedtuple('SurfaceFlingerFrame', 'desired_present_time actual_present_time frame_ready_time')
GfxInfoFrame = collections.namedtuple('GfxInfoFrame', 'Flags IntendedVsync Vsync OldestInputEvent NewestInputEvent HandleInputStart AnimationStart PerformTraversalsStart DrawStart SyncQueued SyncStart IssueDrawCommandsStart SwapBuffers FrameCompleted')
# https://android.googlesource.com/platform/frameworks/base/+/marshmallow-release/libs/hwui/JankTracker.cpp
# Frames that are exempt from jank metrics.
# First-draw frames, for example, are expected to be slow,
# this is hidden from the user with window animations and other tricks
# Similarly, we don't track direct-drawing via Surface:lockHardwareCanvas() for now
# Android M: WindowLayoutChanged | SurfaceCanvas
GFXINFO_EXEMPT = 1 | 4
class FpsProcessor(object):
"""
Provides common object for processing surfaceFlinger output for frame
statistics.
This processor returns the four frame statistics below:
:FPS: Frames Per Second. This is the frame rate of the workload.
:frame_count: The total number of frames rendered during the execution of
the workload.
:janks: The number of "janks" that occurred during execution of the
workload. Janks are sudden shifts in frame rate. They result
in a "stuttery" UI. See http://jankfree.org/jank-busters-io
:not_at_vsync: The number of frames that did not render in a single
vsync cycle.
"""
def __init__(self, data, action=None, extra_data=None):
"""
data - a pandas.DataFrame object with frame data (e.g. frames.csv)
action - output metrics names with additional action specifier
extra_data - extra data given to use for calculations of metrics
"""
self.data = data
self.action = action
self.extra_data = extra_data
def process(self, refresh_period, drop_threshold): # pylint: disable=too-many-locals
"""
Generate frame per second (fps) and associated metrics for workload.
refresh_period - the vsync interval
drop_threshold - data points below this fps will be dropped
"""
fps = float('nan')
frame_count, janks, not_at_vsync = 0, 0, 0
vsync_interval = refresh_period
per_frame_fps = pd.Series()
# SurfaceFlinger Algorithm
if self.data.columns.tolist() == list(SurfaceFlingerFrame._fields):
# fiter out bogus frames.
bogus_frames_filter = self.data.actual_present_time != 0x7fffffffffffffff
actual_present_times = self.data.actual_present_time[bogus_frames_filter]
actual_present_time_deltas = actual_present_times.diff().dropna()
vsyncs_to_compose = actual_present_time_deltas.div(vsync_interval)
vsyncs_to_compose.apply(lambda x: int(round(x, 0)))
# drop values lower than drop_threshold FPS as real in-game frame
# rate is unlikely to drop below that (except on loading screens
# etc, which should not be factored in frame rate calculation).
per_frame_fps = (1.0 / (vsyncs_to_compose.multiply(vsync_interval / 1e9)))
keep_filter = per_frame_fps > drop_threshold
filtered_vsyncs_to_compose = vsyncs_to_compose[keep_filter]
per_frame_fps.name = 'fps'
if not filtered_vsyncs_to_compose.empty:
total_vsyncs = filtered_vsyncs_to_compose.sum()
frame_count = filtered_vsyncs_to_compose.size
if total_vsyncs:
fps = 1e9 * frame_count / (vsync_interval * total_vsyncs)
janks = self._calc_janks(filtered_vsyncs_to_compose)
not_at_vsync = self._calc_not_at_vsync(vsyncs_to_compose)
# GfxInfo Algorithm
elif self.data.columns.tolist() == list(GfxInfoFrame._fields):
frame_time = self.data.FrameCompleted - self.data.IntendedVsync
per_frame_fps = (1e9 / frame_time)
keep_filter = per_frame_fps > drop_threshold
per_frame_fps = per_frame_fps[keep_filter]
per_frame_fps.name = 'fps'
frame_count = self.data.index.size
if frame_count:
janks = frame_time[frame_time >= vsync_interval].count()
not_at_vsync = self.data.IntendedVsync - self.data.Vsync
not_at_vsync = not_at_vsync[not_at_vsync != 0].count()
duration = self.data.Vsync.iloc[-1] - self.data.Vsync.iloc[0]
fps = (1e9 * frame_count) / float(duration)
# If gfxinfocsv is provided, get stats from that instead
if self.extra_data:
series = pd.read_csv(self.extra_data, header=None, index_col=0, squeeze=True)
if not series.empty: # pylint: disable=maybe-no-member
frame_count = series['Total frames rendered']
janks = series['Janky frames']
not_at_vsync = series['Number Missed Vsync']
metrics = (fps, frame_count, janks, not_at_vsync)
return per_frame_fps, metrics
def percentiles(self):
# SurfaceFlinger Algorithm
if self.data.columns.tolist() == list(SurfaceFlingerFrame._fields):
frame_time = self.data.frame_ready_time.diff()
# GfxInfo Algorithm
elif self.data.columns.tolist() == list(GfxInfoFrame._fields):
frame_time = self.data.FrameCompleted - self.data.IntendedVsync
data = frame_time.dropna().quantile([0.90, 0.95, 0.99])
# Convert to ms, round to nearest, cast to int
data = data.div(1e6).round()
try:
data = data.astype('int')
except ValueError:
pass
# If gfxinfocsv is provided, get stats from that instead
if self.extra_data:
series = pd.read_csv(self.extra_data, header=None, index_col=0, squeeze=True)
if not series.empty: # pylint: disable=maybe-no-member
data = series[series.index.str.contains('th percentile')] # pylint: disable=maybe-no-member
return list(data.get_values())
@staticmethod
def _calc_janks(filtered_vsyncs_to_compose):
"""
Internal method for calculating jank frames.
"""
pause_latency = 20
vtc_deltas = filtered_vsyncs_to_compose.diff().dropna()
vtc_deltas = vtc_deltas.abs()
janks = vtc_deltas.apply(lambda x: (pause_latency > x > 1.5) and 1 or 0).sum()
return janks
@staticmethod
def _calc_not_at_vsync(vsyncs_to_compose):
"""
Internal method for calculating the number of frames that did not
render in a single vsync cycle.
"""
epsilon = 0.0001
func = lambda x: (abs(x - 1.0) > epsilon) and 1 or 0
not_at_vsync = vsyncs_to_compose.apply(func).sum()
return not_at_vsync
| apache-2.0 |
burnash/NeuroVault | neurovault/settings.py | 2 | 11986 | # Django settings for neurovault project.
import os
import sys
import tempfile
from datetime import timedelta
import matplotlib
from kombu import Exchange, Queue
matplotlib.use('Agg')
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DEBUG = True
DOMAIN_NAME = "http://neurovault.org"
TEMPLATE_DEBUG = DEBUG
ADMINS = (
(('Chris', '[email protected]'))
)
MANAGERS = ADMINS
SITE_ID = 1
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'postgres',
# The following settings are not used with sqlite3:
'USER': 'postgres',
'HOST': 'db', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '5432', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Berlin'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
MEDIA_URL = '/public/media/'
PRIVATE_MEDIA_ROOT = '/var/www/image_data'
PRIVATE_MEDIA_URL = '/media/images'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = '/var/www/static'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'neurovault.apps.statmaps.middleware.CollectionRedirectMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'neurovault.urls'
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'neurovault.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': (),
'OPTIONS': {'context_processors': ("django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
'django.core.context_processors.request'),
'loaders': ('hamlpy.template.loaders.HamlPyFilesystemLoader',
'hamlpy.template.loaders.HamlPyAppDirectoriesLoader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)}
}
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'neurovault.apps.main',
'neurovault.apps.statmaps',
'neurovault.apps.users',
'django.contrib.sitemaps',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
#'django.contrib.admindocs',
'social.apps.django_app.default',
'rest_framework',
'taggit',
'crispy_forms',
'coffeescript',
'taggit_templatetags',
#'south',
'dbbackup',
'polymorphic',
'djcelery',
'django_cleanup',
'file_resubmit',
'django_mailgun',
'django_hstore',
'guardian',
'oauth2_provider',
'fixture_media'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'filters': {
# 'require_debug_false': {
# '()': 'django.utils.log.RequireDebugFalse'
# }
# },
# 'handlers': {
# 'mail_admins': {
# 'level': 'ERROR',
# 'filters': ['require_debug_false'],
# 'class': 'django.utils.log.AdminEmailHandler'
# }
# },
# 'loggers': {
# 'django.request': {
# 'handlers': ['mail_admins'],
# 'level': 'ERROR',
# 'propagate': True,
# },
# }
# }
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'social.backends.facebook.FacebookOAuth2',
'social.backends.google.GoogleOAuth2',
'guardian.backends.ObjectPermissionBackend',
)
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.social_auth.associate_by_email', # <--- enable this one
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details'
)
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
REST_FRAMEWORK = {
# Use hyperlinked styles by default.
# Only used if the `serializer_class` attribute is not set on a view.
'DEFAULT_MODEL_SERIALIZER_CLASS':
'rest_framework.serializers.HyperlinkedModelSerializer',
# LimitOffsetPagination will allow to set a ?limit= and ?offset=
# variable in the URL.
'DEFAULT_PAGINATION_CLASS':
'neurovault.api.pagination.StandardResultPagination',
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'oauth2_provider.ext.rest_framework.OAuth2Authentication',
),
'DEFAULT_RENDERER_CLASSES': (
'neurovault.api.utils.ExplicitUnicodeJSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'UNICODE_JSON': True,
}
OAUTH2_PROVIDER = {
'REQUEST_APPROVAL_PROMPT': 'auto'
}
LOGIN_REDIRECT_URL = '/my_collections/'
#LOGIN_URL = '/login-form/'
#LOGIN_ERROR_URL = '/login-error/'
CRISPY_TEMPLATE_PACK = 'bootstrap3'
DBBACKUP_STORAGE = 'dbbackup.storage.dropbox_storage'
DBBACKUP_TOKENS_FILEPATH = '/home/filo/dbtokens'
DBBACKUP_POSTGRES_BACKUP_COMMAND = 'export PGPASSWORD=neurovault\n pg_dump --username={adminuser} --host={host} --port={port} {databasename} >'
# For Apache, use 'sendfile.backends.xsendfile'
# For Nginx, use 'sendfile.backends.nginx'
# For Devserver, use 'sendfile.backends.development'
SENDFILE_BACKEND = 'sendfile.backends.development'
PRIVATE_MEDIA_REDIRECT_HEADER = 'X-Accel-Redirect'
PYCORTEX_DATASTORE = os.path.join(BASE_DIR,'pycortex_data')
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
"file_resubmit": {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
"LOCATION": '/tmp/file_resubmit/'
}
}
# Mandrill config
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
MAILGUN_ACCESS_KEY = 'key-3ax6xnjp29jd6fds4gc373sgvjxteol0' # replace with a real key in production
MAILGUN_SERVER_NAME = 'samples.mailgun.org'# replace with 'neurovault.org' in production
DEFAULT_FROM_EMAIL = "[email protected]"
if os.path.exists('/usr/local/share/pycortex/db/fsaverage'):
STATICFILES_DIRS = (
('pycortex-resources', '/usr/local/lib/python2.7/site-packages/cortex/webgl/resources'),
('pycortex-ctmcache', '/usr/local/share/pycortex/db/fsaverage/cache')
)
# Celery config
BROKER_URL = 'redis://redis:6379/0'
CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_DEFAULT_QUEUE = 'default'
CELERY_QUEUES = (
Queue('default', Exchange('default'), routing_key='default'),
)
CELERY_IMPORTS = ('neurovault.apps.statmaps.tasks', )
CELERYBEAT_SCHEDULE = {
'anima_crawl_every day': {
'task': 'crawl_anima',
'schedule': timedelta(days=1)
},
}
CELERY_TIMEZONE = 'Europe/Berlin'
ANONYMOUS_USER_ID = -1
DEFAULT_OAUTH_APPLICATION_ID = -1
DEFAULT_OAUTH_APP_NAME = 'DefaultOAuthApp'
DEFAULT_OAUTH_APP_OWNER_ID = -2
DEFAULT_OAUTH_APP_OWNER_USERNAME = 'DefaultAppOwner'
OAUTH_PERSONAL_TOKEN_LENGTH = 40
# Bogus secret key.
try:
from secrets import *
except ImportError:
from bogus_secrets import *
try:
from local_settings import *
except ImportError:
pass
# freesurfer/pycortex environment
os.environ["XDG_CONFIG_HOME"] = PYCORTEX_DATASTORE
os.environ["FREESURFER_HOME"] = "/opt/freesurfer"
os.environ["SUBJECTS_DIR"] = os.path.join(os.environ["FREESURFER_HOME"],"subjects")
os.environ["FSLOUTPUTTYPE"] = "NIFTI_GZ"
# provToolbox path
os.environ["PATH"] += os.pathsep + '/path/to/lib/provToolbox/bin'
#CELERYBEAT_SCHEDULE = {
# 'run_make_correlation_df': {
# 'task': 'neurovault.apps.statmaps.tasks...',
# 'schedule': timedelta(minutes=30),
# },
#}
# or manage periodic schedule in django admin
#CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
if "test" in sys.argv or "benchmark" in sys.argv:
test_media_root = os.path.join(tempfile.mkdtemp(prefix="neurovault_test_"))
PRIVATE_MEDIA_ROOT = test_media_root
CELERY_ALWAYS_EAGER = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
TAGGIT_CASE_INSENSITIVE=True
FIXTURE_DIRS = (
'apps/statmaps/fixtures/',
)
MEDIA_ROOT = PRIVATE_MEDIA_ROOT
| mit |
jtrussell/think-bayes-workspace | src/vendor/AllenDowney/paintball.py | 2 | 5431 | """This file contains code used in "Think Bayes",
by Allen B. Downey, available from greenteapress.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import thinkbayes
import matplotlib.pyplot as pyplot
import thinkplot
import math
import sys
FORMATS = ['pdf', 'eps', 'png']
def StrafingSpeed(alpha, beta, x):
"""Computes strafing speed, given location of shooter and impact.
alpha: x location of shooter
beta: y location of shooter
x: location of impact
Returns: derivative of x with respect to theta
"""
theta = math.atan2(x - alpha, beta)
speed = beta / math.cos(theta)**2
return speed
def MakeLocationPmf(alpha, beta, locations):
"""Computes the Pmf of the locations, given alpha and beta.
Given that the shooter is at coordinates (alpha, beta),
the probability of hitting any spot is inversely proportionate
to the strafe speed.
alpha: x position
beta: y position
locations: x locations where the pmf is evaluated
Returns: Pmf object
"""
pmf = thinkbayes.Pmf()
for x in locations:
prob = 1.0 / StrafingSpeed(alpha, beta, x)
pmf.Set(x, prob)
pmf.Normalize()
return pmf
class Paintball(thinkbayes.Suite, thinkbayes.Joint):
"""Represents hypotheses about the location of an opponent."""
def __init__(self, alphas, betas, locations):
"""Makes a joint suite of parameters alpha and beta.
Enumerates all pairs of alpha and beta.
Stores locations for use in Likelihood.
alphas: possible values for alpha
betas: possible values for beta
locations: possible locations along the wall
"""
self.locations = locations
pairs = [(alpha, beta)
for alpha in alphas
for beta in betas]
thinkbayes.Suite.__init__(self, pairs)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
hypo: pair of alpha, beta
data: location of a hit
Returns: float likelihood
"""
alpha, beta = hypo
x = data
pmf = MakeLocationPmf(alpha, beta, self.locations)
like = pmf.Prob(x)
return like
def MakePmfPlot(alpha = 10):
"""Plots Pmf of location for a range of betas."""
locations = range(0, 31)
betas = [10, 20, 40]
thinkplot.PrePlot(num=len(betas))
for beta in betas:
pmf = MakeLocationPmf(alpha, beta, locations)
pmf.name = 'beta = %d' % beta
thinkplot.Pmf(pmf)
thinkplot.Save('paintball1',
xlabel='Distance',
ylabel='Prob',
formats=FORMATS)
def MakePosteriorPlot(suite):
"""Plots the posterior marginal distributions for alpha and beta.
suite: posterior joint distribution of location
"""
marginal_alpha = suite.Marginal(0)
marginal_alpha.name = 'alpha'
marginal_beta = suite.Marginal(1)
marginal_beta.name = 'beta'
print 'alpha CI', marginal_alpha.CredibleInterval(50)
print 'beta CI', marginal_beta.CredibleInterval(50)
thinkplot.PrePlot(num=2)
#thinkplot.Pmf(marginal_alpha)
#thinkplot.Pmf(marginal_beta)
thinkplot.Cdf(thinkbayes.MakeCdfFromPmf(marginal_alpha))
thinkplot.Cdf(thinkbayes.MakeCdfFromPmf(marginal_beta))
thinkplot.Save('paintball2',
xlabel='Distance',
ylabel='Prob',
loc=4,
formats=FORMATS)
def MakeConditionalPlot(suite):
"""Plots marginal CDFs for alpha conditioned on beta.
suite: posterior joint distribution of location
"""
betas = [10, 20, 40]
thinkplot.PrePlot(num=len(betas))
for beta in betas:
cond = suite.Conditional(0, 1, beta)
cond.name = 'beta = %d' % beta
thinkplot.Pmf(cond)
thinkplot.Save('paintball3',
xlabel='Distance',
ylabel='Prob',
formats=FORMATS)
def MakeContourPlot(suite):
"""Plots the posterior joint distribution as a contour plot.
suite: posterior joint distribution of location
"""
thinkplot.Contour(suite.GetDict(), contour=False, pcolor=True)
thinkplot.Save('paintball4',
xlabel='alpha',
ylabel='beta',
axis=[0, 30, 0, 20],
formats=FORMATS)
def MakeCrediblePlot(suite):
"""Makes a plot showing several two-dimensional credible intervals.
suite: Suite
"""
d = dict((pair, 0) for pair in suite.Values())
percentages = [75, 50, 25]
for p in percentages:
interval = suite.MaxLikeInterval(p)
for pair in interval:
d[pair] += 1
thinkplot.Contour(d, contour=False, pcolor=True)
pyplot.text(17, 4, '25', color='white')
pyplot.text(17, 15, '50', color='white')
pyplot.text(17, 30, '75')
thinkplot.Save('paintball5',
xlabel='alpha',
ylabel='beta',
formats=FORMATS)
def main(script):
alphas = range(0, 31)
betas = range(1, 51)
locations = range(0, 31)
suite = Paintball(alphas, betas, locations)
suite.UpdateSet([15, 16, 18, 21])
MakeCrediblePlot(suite)
MakeContourPlot(suite)
MakePosteriorPlot(suite)
MakeConditionalPlot(suite)
MakePmfPlot()
if __name__ == '__main__':
main(*sys.argv)
| mit |
roxyboy/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 105 | 26588 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg and"
" lbfgs solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
| bsd-3-clause |
jaidevd/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 78 | 6016 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_raises
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
from sklearn.ensemble.gradient_boosting import QuantileLossFunction
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float64)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float64)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float64)
y.fill(0.0)
sw = np.ones(102, dtype=np.float64)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float64)
y.fill(1.0)
sw = np.ones(102, dtype=np.float64)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_quantile_loss_function():
# Non regression test for the QuantileLossFunction object
# There was a sign problem when evaluating the function
# for negative values of 'ytrue - ypred'
x = np.asarray([-1.0, 0.0, 1.0])
y_found = QuantileLossFunction(1, 0.9)(x, np.zeros_like(x))
y_expected = np.asarray([0.1, 0.0, 0.9]).mean()
np.testing.assert_allclose(y_found, y_expected)
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
mbrucher/AudioTK | tests/Python/Delay/PyATKDelay_chorus_test.py | 1 | 2560 | #!/usr/bin/env python
from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter
from ATK.Delay import DoubleUniversalVariableDelayLineFilter
from ATK.EQ import DoubleSecondOrderLowPassFilter
sample_rate = 96000
def filter(noise, input, blend=0, feedback=0, feedforward=1):
import numpy as np
output = np.zeros(input.shape, dtype=np.float64)
infilter = DoubleInPointerFilter(input, False)
infilter.input_sampling_rate = sample_rate
noisefilter = DoubleInPointerFilter(noise, False)
noisefilter.input_sampling_rate = sample_rate
lownoisefilter = DoubleSecondOrderLowPassFilter()
lownoisefilter.input_sampling_rate = sample_rate
lownoisefilter.cut_frequency = 5
lownoisefilter.set_input_port(0, noisefilter, 0)
delayfilter = DoubleUniversalVariableDelayLineFilter(5000)
delayfilter.input_sampling_rate = sample_rate
delayfilter.set_input_port(0, infilter, 0)
delayfilter.set_input_port(1, lownoisefilter, 0)
delayfilter.blend = blend
delayfilter.feedback = feedback
delayfilter.feedforward = feedforward
outfilter = DoubleOutPointerFilter(output, False)
outfilter.input_sampling_rate = sample_rate
outfilter.set_input_port(0, delayfilter, 0)
outfilter.process(input.shape[1])
return output
def chorus_test():
import numpy as np
from numpy.testing import assert_almost_equal
import os
dirname = os.path.dirname(__file__)
x = np.fromfile(dirname + os.sep + "input_chorus.dat", dtype=np.float64).reshape(1, -1)
noise = np.fromfile(dirname + os.sep + "noise_chorus.dat", dtype=np.float64).reshape(1, -1)
ref = np.fromfile(dirname + os.sep + "output_chorus.dat", dtype=np.float64).reshape(1, -1)
out = filter(noise, x, feedforward=1, blend=0.7, feedback=-0.7)
assert_almost_equal(out, ref)
if __name__ == "__main__":
import numpy as np
import matplotlib.pyplot as plt
samples = 2000000
freq_max = 20000
import sys, os
sys.path.append(os.path.dirname(os.path.realpath(__file__))+"/..")
from display.compare_spec import plot_me
t = np.arange(samples, dtype=np.float64).reshape(1, -1) / sample_rate
d = np.sin(np.pi * (sample_rate * freq_max / samples * (t + .1)) * t)
d[:,:1000].tofile("input_chorus.dat")
noise = 20e-3 * sample_rate + 5e-3 * sample_rate * np.random.randn(1, samples)
noise[:,:1000].tofile("noise_chorus.dat")
out = filter(noise, d, feedforward=1, blend=0.7, feedback=-0.7)
out[:,:1000].tofile("output_chorus.dat")
plt.figure()
plot_me((d[0], out[0]), sample_rate)
plt.gcf().suptitle("Delay")
plt.legend()
plt.show()
| bsd-3-clause |
adammenges/statsmodels | statsmodels/examples/ex_misc_tarma.py | 34 | 1875 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 03 23:01:44 2013
Author: Josef Perktold
"""
from __future__ import print_function
import numpy as np
from statsmodels.tsa.arima_process import arma_generate_sample, ArmaProcess
from statsmodels.miscmodels.tmodel import TArma
from statsmodels.tsa.arima_model import ARMA
nobs = 500
ar = [1, -0.6, -0.1]
ma = [1, 0.7]
dist = lambda n: np.random.standard_t(3, size=n)
np.random.seed(8659567)
x = arma_generate_sample(ar, ma, nobs, sigma=1, distrvs=dist,
burnin=500)
mod = TArma(x)
order = (2, 1)
res = mod.fit(order=order)
res2 = mod.fit_mle(order=order, start_params=np.r_[res[0], 5, 1], method='nm')
print(res[0])
proc = ArmaProcess.from_coeffs(res[0][:order[0]], res[0][:order[1]])
print(ar, ma)
proc.nobs = nobs
# TODO: bug nobs is None, not needed ?, used in ArmaProcess.__repr__
print(proc.ar, proc.ma)
print(proc.ar_roots(), proc.ma_roots())
from statsmodels.tsa.arma_mle import Arma
modn = Arma(x)
resn = modn.fit_mle(order=order)
moda = ARMA(x, order=order)
resa = moda.fit( trend='nc')
print('\nparameter estimates')
print('ls ', res[0])
print('norm', resn.params)
print('t ', res2.params)
print('A ', resa.params)
print('\nstandard deviation of parameter estimates')
#print 'ls ', res[0] #TODO: not available yet
print('norm', resn.bse)
print('t ', res2.bse)
print('A ', resa.bse)
print('A/t-1', resa.bse / res2.bse[:3] - 1)
print('other bse')
print(resn.bsejac)
print(resn.bsejhj)
print(res2.bsejac)
print(res2.bsejhj)
print(res2.t_test(np.eye(len(res2.params))))
# TArma has no fittedvalues and resid
# TODO: check if lag is correct or if fitted `x-resid` is shifted
resid = res2.model.geterrors(res2.params)
fv = res[2]['fvec'] #resid returned from leastsq?
import matplotlib.pyplot as plt
plt.plot(x, 'o', alpha=0.5)
plt.plot(x-resid)
plt.plot(x-fv)
#plt.show()
| bsd-3-clause |
lbishal/scikit-learn | sklearn/neighbors/base.py | 30 | 30586 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array, _get_n_jobs, gen_even_slices
from ..utils.fixes import argpartition
from ..utils.multiclass import check_classification_targets
from ..externals import six
from ..externals.joblib import Parallel, delayed
from ..exceptions import NotFittedError
from ..exceptions import DataConversionWarning
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1):
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
if metric == 'precomputed':
alg_check = 'brute'
else:
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if ((self.n_neighbors is None or
self.n_neighbors < self._fit_X.shape[0] // 2) and
self.metric != 'precomputed'):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
self.n_neighbors
)
return self
@property
def _pairwise(self):
# For cross-validation routines to split data correctly
return self.metric == 'precomputed'
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([[1., 1., 1.]])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
n_jobs = _get_n_jobs(self.n_jobs)
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=n_jobs, squared=True)
else:
dist = pairwise_distances(
X, self._fit_X, self.effective_metric_, n_jobs=n_jobs,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = Parallel(n_jobs, backend='threading')(
delayed(self._tree.query, check_pickle=False)(
X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
dist, neigh_ind = tuple(zip(*result))
result = np.vstack(dist), np.vstack(neigh_ind)
else:
result = np.vstack(result)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
check_classification_targets(y)
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
"""
return self._fit(X)
| bsd-3-clause |
prajjwal1/prajjwal1.github.io | _site/markdown_generator/publications.py | 197 | 3887 |
# coding: utf-8
# # Publications markdown generator for academicpages
#
# Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook, with the core python code in publications.py. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one that fits your format.
#
# TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
#
# ## Data format
#
# The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
#
# - `excerpt` and `paper_url` can be blank, but the others must have values.
# - `pub_date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`
# ## Import pandas
#
# We are using the very handy pandas library for dataframes.
# In[2]:
import pandas as pd
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. If you don't want something to appear (like the "Recommended citation")
# In[5]:
import os
for row, item in publications.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publications"""
md += """\npermalink: /publication/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
if len(str(item.paper_url)) > 5:
md += "\npaperurl: '" + item.paper_url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.paper_url)) > 5:
md += "\n\n<a href='" + item.paper_url + "'>Download paper here</a>\n"
if len(str(item.excerpt)) > 5:
md += "\n" + html_escape(item.excerpt) + "\n"
md += "\nRecommended citation: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
| mit |
facom/cometsyn | tmp/comet-snapshot.py | 1 | 3009 | """
COMET DESINTEGRATION
SNAPSHOT
"""
from matplotlib import pyplot as plt,patches as pat,cm
from mpl_toolkits.mplot3d import Axes3D
from numpy import *
from sys import *
from os import system
#############################################################
#CONSTANTS
#############################################################
NSTATE=7
NORM=linalg.norm
#############################################################
#OBSERVATIONAL DATA
#############################################################
orbdata=loadtxt("comet-orbit.dat");
config={}
execfile("fragments.gph",config);
norb=orbdata.shape[0]
nfrag=config['nfrag']
nlarge=config['nlarge']
ndebris=config['ndebris']
tini=config['tini']
print "Orbital properties:"
print "\tNumber of points:%d"%norb
print "\tNumber of fragments:%d"%nfrag
print "\tLarge fragments:%d"%nlarge
print "\tDebris:%d"%ndebris
ts=orbdata[:,0]
xcm=orbdata[:,1:7]
xs=[]
for i in xrange(0,nfrag):
k=NSTATE*(i+1)+1
xs+=[orbdata[:,k:k+NSTATE]]
xs=array(xs)
#############################################################
#SELECT SNAPSHOT
#############################################################
try:
iobs=int(argv[1])
except:
iobs=0
if iobs>=norb:
print "Maximum snapshot %d (t = %e)"%(norb,ts[norb-1]+tini)
exit(1)
t=ts[iobs]
print "Snaptshot: %d, t - tper = %.2f yrs"%(iobs,t+tini)
print "Integration time: %.2f yrs = %.2f days"%(t,t*365.25)
rcm=xcm[iobs,0:NSTATE]
d=NORM(rcm[0:3])
D=d
f=open("comet-fragments-snapshots.dat","w")
rmax=0
vmax=0
for i in xrange(0,nfrag):
rs=xs[i,iobs,0:NSTATE]
rf=rs-rcm
rfnorm=NORM(rf[0:3])
vnorm=NORM(rf[3:6])
rmax=max(rmax,rfnorm)
vmax=max(vmax,vnorm)
type=1.0
if i>=nlarge:type=2.0
f.write("%e %-+23.17e %-+23.17e %-+23.17e %-+23.17e %-+23.17e %-+23.17e\n"%(type,
rf[0],
rf[1],
rf[2],
rf[3],
rf[4],
rf[5]))
f.close()
rmax*=config['UL']
vmax*=config['UL']/config['UT']
print "Maximum distance: %e km"%(rmax/1E3)
print "Maximum separation velocity: %e km/s"%(vmax/1E3)
#############################################################
#SAVE fragments.gph
#############################################################
f=open("fragments.gph","w")
f.write("""\
file='comet-fragments-snapshots.dat'
title='t = %.2f yrs, d = %.2f AU, D = %.2f AU'
Rmax=%e
Rc=%e
rf=%e
"""%(t+tini,d,D,
rmax,config['Rc']*10,config['rf']))
f.close()
#############################################################
#PLOT
#############################################################
system("gnuplot plot-fragments.gpl")
| gpl-2.0 |
IssamLaradji/scikit-learn | sklearn/manifold/isomap.py | 36 | 7119 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
beepee14/scikit-learn | benchmarks/bench_plot_incremental_pca.py | 374 | 6430 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause |
dilawar/moose-full | moose-examples/neuroml/lobster_pyloric/STG_net.py | 2 | 5507 | ## Aditya Gilra, NCBS, Bangalore, 2013
"""
Stomatogastric ganglion Central Pattern Generator:
generates pyloric rhythm of the lobster
Network model translated from:
Prinz, Bucher, Marder, Nature Neuroscience, 2004;
STG neuron models translated from:
Prinz, Billimoria, Marder, J.Neurophys., 2003.
Translated into MOOSE by Aditya Gilra, Bangalore, 2013, revised 2014.
"""
#import os
#os.environ['NUMPTHREADS'] = '1'
import sys
sys.path.extend(['../../../python','synapses'])
import moose
from moose.utils import *
from moose.neuroml.NeuroML import NeuroML
import matplotlib.pyplot as plt
import numpy as np
simdt = 25e-6 # s
plotdt = 25e-6 # s
runtime = 10.0 # s
cells_path = '/cells' # neuromlR.readNeuroMLFromFile creates cells in '/cells'
def loadRunSTGNeuroML_L123(filename):
'Loads and runs the pyloric rhythm generator from NeuroML files.'
# for graded synapses, else NeuroML event-based are used
from load_synapses import load_synapses
moose.Neutral('/library')
# set graded to False to use event based synapses
# if False, neuroml event-based synapses get searched for and loaded
# True to load graded synapses
graded_syn = True
#graded_syn = False
if graded_syn:
load_synapses()
neuromlR = NeuroML()
## readNeuroMLFromFile below returns:
# This returns
# populationDict = {
# 'populationname1':('cellName',{('instanceid1'):moosecell, ... })
# , ...
# }
# (cellName and instanceid are strings, mooosecell is a moose.Neuron object instance)
# and
# projectionDict = {
# 'projName1':('source','target',[('syn_name1','pre_seg_path','post_seg_path')
# ,...])
# , ...
# }
populationDict, projectionDict = \
neuromlR.readNeuroMLFromFile(filename)
soma1_path = populationDict['AB_PD'][1][0].path+'/Soma_0'
soma1Vm = setupTable('somaVm',moose.Compartment(soma1_path),'Vm')
soma2_path = populationDict['LP'][1][0].path+'/Soma_0'
soma2Vm = setupTable('somaVm',moose.Compartment(soma2_path),'Vm')
soma3_path = populationDict['PY'][1][0].path+'/Soma_0'
soma3Vm = setupTable('somaVm',moose.Compartment(soma3_path),'Vm')
# monitor channel current
channel_path = soma1_path + '/KCa_STG'
channel_Ik = setupTable('KCa_Ik',moose.element(channel_path),'Ik')
# monitor Ca
capool_path = soma1_path + '/CaPool_STG'
capool_Ca = setupTable('CaPool_Ca',moose.element(capool_path),'Ca')
# monitor synaptic current
soma2 = moose.element(soma2_path)
print "Children of",soma2_path,"are:"
for child in soma2.children:
print child.className, child.path
if graded_syn:
syn_path = soma2_path+'/DoubExpSyn_Ach__cells-0-_AB_PD_0-0-_Soma_0'
syn = moose.element(syn_path)
else:
syn_path = soma2_path+'/DoubExpSyn_Ach'
syn = moose.element(syn_path)
syn_Ik = setupTable('DoubExpSyn_Ach_Ik',syn,'Ik')
print "Reinit MOOSE ... "
resetSim(['/elec',cells_path], simdt, plotdt, simmethod='hsolve')
print "Using graded synapses? = ", graded_syn
print "Running model filename = ",filename," ... "
moose.start(runtime)
tvec = np.arange(0.0,runtime+2*plotdt,plotdt)
tvec = tvec[ : soma1Vm.vector.size ]
fig = plt.figure(facecolor='w',figsize=(10,6))
axA = plt.subplot2grid((3,2),(0,0),rowspan=3,colspan=1,frameon=False)
img = plt.imread( 'STG.png' )
imgplot = axA.imshow( img )
for tick in axA.get_xticklines():
tick.set_visible(False)
for tick in axA.get_yticklines():
tick.set_visible(False)
axA.set_xticklabels([])
axA.set_yticklabels([])
ax = plt.subplot2grid((3,2),(0,1),rowspan=1,colspan=1)
ax.plot(tvec,soma1Vm.vector*1000,label='AB_PD',color='g',linestyle='solid')
ax.set_xticklabels([])
ax.set_ylabel('AB_PD (mV)')
ax = plt.subplot2grid((3,2),(1,1),rowspan=1,colspan=1)
ax.plot(tvec,soma2Vm.vector*1000,label='LP',color='r',linestyle='solid')
ax.set_xticklabels([])
ax.set_ylabel('LP (mV)')
ax = plt.subplot2grid((3,2),(2,1),rowspan=1,colspan=1)
ax.plot(tvec,soma3Vm.vector*1000,label='PY',color='b',linestyle='solid')
ax.set_ylabel('PY (mV)')
ax.set_xlabel('time (s)')
fig.tight_layout()
fig = plt.figure(facecolor='w')
plt.plot(tvec,soma2Vm.vector*1000,label='LP',color='r',linestyle='solid')
plt.plot(tvec,soma3Vm.vector*1000,label='PY',color='b',linestyle='solid')
plt.legend()
plt.xlabel('time (s)')
plt.ylabel('Soma Vm (mV)')
plt.figure(facecolor='w')
plt.plot(tvec,channel_Ik.vector,color='b',linestyle='solid')
plt.title('KCa current; Ca conc')
plt.xlabel('time (s)')
plt.ylabel('Ik (Amp)')
plt.twinx()
plt.plot(tvec,capool_Ca.vector,color='r',linestyle='solid')
plt.ylabel('Ca (mol/m^3)')
plt.figure(facecolor='w')
plt.plot(tvec,syn_Ik.vector,color='b',linestyle='solid')
plt.title('Ach syn current in '+soma2_path)
plt.xlabel('time (s)')
plt.ylabel('Isyn (S)')
print "Showing plots ..."
plt.show()
filename = "Generated.net.xml"
if __name__ == "__main__":
'''
Inside the Demos/neuroml/lobster_ploric/ directory supplied with MOOSE, run
``python STG_net.py``
(other channels and morph xml files are already present in this same directory).
read the pdf documentation for a tutorial by Aditya Gilra.
'''
if len(sys.argv)>=2:
filename = sys.argv[1]
loadRunSTGNeuroML_L123(filename)
| gpl-2.0 |
lehinevych/Dato-Core | src/unity/python/graphlab/test/test_dataframe.py | 13 | 1711 | '''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
import unittest
import pandas
import array
from graphlab.cython.cy_dataframe import _dataframe
from pandas.util.testing import assert_frame_equal
class DataFrameTest(unittest.TestCase):
def test_empty(self):
expected = pandas.DataFrame()
assert_frame_equal(_dataframe(expected), expected)
expected['int'] = []
expected['float'] = []
expected['str'] = []
assert_frame_equal(_dataframe(expected), expected)
def test_simple_dataframe(self):
expected = pandas.DataFrame()
expected['int'] = [i for i in range(10)]
expected['float'] = [float(i) for i in range(10)]
expected['str'] = [str(i) for i in range(10)]
expected['unicode'] = [unicode(i) for i in range(10)]
expected['array'] = [array.array('d', [i]) for i in range(10)]
expected['ls'] = [[str(i)] for i in range(10)]
assert_frame_equal(_dataframe(expected), expected)
def test_sparse_dataframe(self):
expected = pandas.DataFrame()
expected['sparse_int'] = [i if i % 2 == 0 else None for i in range(10)]
expected['sparse_float'] = [float(i) if i % 2 == 1 else None for i in range(10)]
expected['sparse_str'] = [str(i) if i % 3 == 0 else None for i in range(10)]
expected['sparse_array'] = [array.array('d', [i]) if i % 5 == 0 else None for i in range(10)]
expected['sparse_list'] = [[str(i)] if i % 7 == 0 else None for i in range(10)]
assert_frame_equal(_dataframe(expected), expected)
| agpl-3.0 |
rtrwalker/geotecha | geotecha/consolidation/nogamiandli2003.py | 1 | 38248 | # geotecha - A software suite for geotechncial engineering
# Copyright (C) 2018 Rohan T. Walker ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/gpl.html.
"""
Nogami and Li (2003) 'Consolidation of Clay with a System of Vertical and
Horizontal Drains'.
"""
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import geotecha.inputoutput.inputoutput as inputoutput
import math
import textwrap
import scipy.optimize
import geotecha.piecewise.piecewise_linear_1d as pwise
import cmath
import time
from geotecha.mathematics.root_finding import find_n_roots
import geotecha.plotting.one_d
import scipy.special
#from scipy.special import j0, y0, j1, y1
besselj = scipy.special.jn
bessely = scipy.special.yn
from geotecha.inputoutput.inputoutput import GenericInputFileArgParser
class NogamiAndLi2003(inputoutput.InputFileLoaderCheckerSaver):
"""Multi-layer vertical and radial consolidation using matrix transfer
Partially implements the article by Nogami and Li (2003) [1]_. While the
article includes special treatment for sand layers and geotextile layers,
this implementation only considers 'soil' layers. (Sand layers are
just normal layers with high kv and low mv).
The coding is not of high quality. The main use is for verification of
speccon models noting that Nogami and Li (2003) use rigourous methods
where as speccon uses equal-strain assumptions for the radial flow part.
Features:
- Multiple layers.
- Vertical flow and radial flow to a central drain (no smear zone).
- Load is uniform with depth but varies piecewise-linear with time.
- No assumptions on radial distribution of strain (i.e. NOT equal-strain).
- pore pressure vs depth at various times. Either at a particular radial
coordinate or averaged in the radial direction.
- Average pore pressure vs time. Average is over the entire soil layer.
.. warning::
The 'Parameters' and 'Attributes' sections below require further
explanation. The parameters listed below are not used to explicitly
initialize the object. Rather they are defined in either a
multi-line string or a file-like object using python syntax.
It is the file object or string object that is used to initialize
the object. Each 'parameter' will be turned into an attribute that
can be accessed using conventional python dot notation, after the
object has been initialised. The attributes listed below are
calculated values (i.e. they could be interpreted as results) which
are accessible using dot notation after all calculations are
complete.
Parameters
----------
z : list/array of float
Depth to calc pore pressure at.
t : list/array of float
Time values to calc average pore pressure at.
tpor : list/array of float
Time values to calc pore pressure profiles at.
h : list/array of float
Layer thicknesses.
nv, nh : tuple of 2 int, optional
Number of series terms to use in vertical and horizontal direction.
Default nv=nh=5.
kv, kh : list/array of float
Layer vertical and horizontal permeability divided by unit weight of
water.
mv : list/array of float
Layer volume compressibility.
bctop, bcbot : [0, 1]
Boundary condition. bctop=0 is free draining, bctop=1 is
impervious.
surcharge_vs_time : PolyLine
Piecewise linear variation of surcharge with time
r1, r0 : float optional
drain influence zone and drain radius. If either is none then only
vertical drainage will be considered.
rcalc : float, optional
Radial coordinate at which to calc pore pressure. Default rcalc=None
i.e. pore pressure is averaged in the radial direction.
radial_roots_x0 : float, optional
Starting point for finding radial eigenvalues.
Default radial_roots_x0=1e-3.
radial_roots_dx : float, optional
Starting increment for finding radial eigenvalues.
Default radial_roots_dx=1e-3.
radial_roots_p : float, optional
Succesive increment length increase factor for finding radial
eigenvalues. default radial_roots_p=1.05.
vertical_roots_x0 : float, optional
Starting point for finding vertical eigenvalues.
Default vertical_roots_x0=1e-7.
vertical_roots_dx : float, optional
Starting increment for finding vertical eigenvalues.
Default vertical_roots_dx=1e-7.
vertical_roots_p : float, optional
Succesive increment lenght increase factor for finding vertical
eigenvalues. Default vertical_roots_p=1.05.
max_iter : int, optional
Max iterations when searching for eigenvalue intervals.
Default max_iter=10000
show_vert_eigs : True/False, optional
If true a vertical eigen value plot will be made.
Default show_vert_eigs=False
plot_properties : dict of dict, optional
dictionary that overrides some of the plot properties.
Each member of `plot_properties` will correspond to one of the plots.
================== ============================================
plot_properties description
================== ============================================
por dict of prop to pass to pore pressure plot.
avp dict of prop to pass to average pore
pressure plot.
set dict of prop to pass to settlement plot.
================== ============================================
see geotecha.plotting.one_d.plot_vs_depth and
geotecha.plotting.one_d.plot_vs_time for options to specify in
each plot dict.
save_data_to_file : True/False, optional
If True data will be saved to file. Default save_data_to_file=False
save_figures_to_file : True/False
If True then figures will be saved to file.
Default save_figures_to_file=False
show_figures : True/False, optional
If True the after calculation figures will be shown on screen.
Default show_figures=False.
directory : string, optional
Path to directory where files should be stored.
Default directory=None which
will use the current working directory. Note if you keep getting
directory does not exist errors then try putting an r before the
string definition. i.e. directory = r'C:\\Users\\...'
overwrite : True/False, optional
If True then existing files will be overwritten.
Default overwrite=False.
prefix : string, optional
Filename prefix for all output files. Default prefix= 'out'
create_directory : True/Fase, optional
If True a new sub-folder with name based on `prefix` and an
incremented number will contain the output
files. Default create_directory=True.
data_ext : string, optional
File extension for data files. Default data_ext='.csv'
input_ext : string, optional
File extension for original and parsed input files. default = ".py"
figure_ext : string, optional
File extension for figures. Can be any valid matplotlib option for
savefig. Default figure_ext=".eps". Others include 'pdf', 'png'.
title : str, optional
A title for the input file. This will appear at the top of data files.
Default title=None, i.e. no title.
author : str, optional
Author of analysis. Default='unknown'.
Attributes
----------
por : array of shape (len(z), len(tpor))
Pore pressure vs depth at various times. Only present if tpor defined.
If rcalc defined then porepressure will be at r=rcalc. If rcalc is
not defined then pore pressure is averaged radially
avp : array of shape (1, len(t))
Averge pore pressure of profile various times. Only present if t
defined. If rcalc defined then pore pressure will be at r=rcalc.
If rcalc is not defined then pore pressure is averaged radially
set : array of shape (1, len(t))
Surface settlement at various times. Only present if t
defined. If rcalc defined then settlement will be at r=rcalc.
If `rcalc` is not defined then settlement is averaged radially.
Notes
-----
It is possbile to initialize the object without a file-like object or
multi-line string, i.e. using the default reader=None. This is not
recommended because you have to explicitly set each attribute. It will
most likely be easier to use a string or file object and then do any
custom modifications to the attributes afterwards.
This program relies on numerical root finding, which can be extremely
troublesome in for the vertical eigenvalue case here (Mainly because I
never figured out how to normalise in the z direction... but that is
another story). You will probably need to fine tune the vertical_roots
parameters to ensure the correct eigenvalues have been found, so:
1. Run the program with the defaults. If it actually excecutes go to
step 3.
2. Increase a combo of `vertical_roots_dx`, `vertical_roots_p` and
`max_iter` untill the program excecutes.
3. Does your pore pressure vs depth plots look ok. If yes, then possibly
accept the results. But better to check eigen values in step 4.
4. Run the method `_plot_vert_roots` with enough points to smoothly
show the characteristic curve. zoom in on the roots and check if
all the roots are found (usually the problems occur with leftmost line.
If not alter `vertical_roots_dx`, `vertical_roots_p` and
`max_iter` untill all roots are captured. Basically if you choose
`vertical_roots_dx` tiny, `vertical_roots_p`=1, and `max_iter` huge
then you will find all the roots but it may take a long time.
Root finding is very hard when there are clumps of closely spaced roots
but the clumps themselves are far apart.
Also note that there are errors in eq.24a and eq.24b in the published
article of Nogami and Li. Also I could never get the vertical
normalisation to work. Also I've done my own normalising for the radial
part.
See Also
--------
geotecha.piecewise.piecewise_linear_1d.PolyLine : How to specify loadings
References
----------
.. [1] Nogami, Toyoaki, and Maoxin Li. 'Consolidation of Clay with a
System of Vertical and Horizontal Drains'. Journal of
Geotechnical and Geoenvironmental Engineering 129, no. 9
(2003): 838-48. doi:10.1061/(ASCE)1090-0241(2003)129:9(838).
"""
def _setup(self):
"""This method overwrites the _setup method in
inputoutput.InputFileLoaderCheckerSaver
"""
self._attribute_defaults = {'bctop': 0, 'bcbot': 0,
'radial_roots_x0': 1e-3,
'radial_roots_dx': 1e-3,
'radial_roots_p': 1.05,
'vertical_roots_x0': 1e-7,
'vertical_roots_dx': 1e-7,
'vertical_roots_p': 1.05,
'max_iter': 10000,
'prefix': 'nl2003_',
'show_vert_eigs': False}
self._attributes = ('z t tpor nv nh h kv kh mv bctop bcbot '
'surcharge_vs_time r0 r1 rcalc '
'radial_roots_x0 radial_roots_dx radial_roots_p '
'vertical_roots_x0 vertical_roots_dx vertical_roots_p '
'max_iter show_vert_eigs' ).split()
self._attributes_that_should_have_same_len_pairs = [
'h kv'.split(),
'kv mv'.split(),
'h mv'.split(),
'h kh'.split(),
'h kv'.split(),
'h mv'.split()] #pairs that should have the same length
self._attributes_that_should_be_lists= []
self._attributes_that_should_have_same_x_limits = []
self.z = None
self.t = None
self.tpor = None
self.nv = self._attribute_defaults.get('nv', None)
self.nh = self._attribute_defaults.get('nh', None)
self.h = None
self.kv = None
self.kh = None
self.mv = None
self.bctop = self._attribute_defaults.get('bctop', None)
self.bcbot = self._attribute_defaults.get('bcbot', None)
self.r0 = None
self.r1 = None
self.rcalc = None
self.radial_roots_x0 = self._attribute_defaults.get('radial_roots_x0', None)
self.radial_roots_dx = self._attribute_defaults.get('radial_roots_dx', None)
self.radial_roots_p = self._attribute_defaults.get('radial_roots_p', None)
self.vertical_roots_x0 = self._attribute_defaults.get('vertical_roots_x0', None)
self.vertical_roots_dx = self._attribute_defaults.get('vertical_roots_dx', None)
self.vertical_roots_p = self._attribute_defaults.get('vertical_roots_p', None)
self.max_iter = self._attribute_defaults.get('max_iter', None)
self.show_vert_eigs=self._attribute_defaults.get('show_vert_eigs', None)
self.surcharge_vs_time = None
self._zero_or_all = [
'h kv mv'.split(),
'r0 r1'.split()]
self._at_least_one = [
['mv'],
['surcharge_vs_time'],
'kv kh'.split(),
'tpor t'.split(),]
self._one_implies_others = ['r0 r1 kh nh'.split(),
'r1 r0 kh nh'.split(),
'kh r0 r1 nh'.split(),
'nh kh r0 r1'.split()]
def _calc_derived_properties(self):
"""Calculate properties/ratios derived from input"""
self.check_input_attributes()
# if self.rcalc is None:
# self.rcalc=self.r1
if not self.t is None:
self.t = np.asarray(self.t)
self.z = np.asarray(self.z)
self.kv = np.asarray(self.kv)
self.mv = np.asarray(self.mv)
if not self.kh is None:
self.kh = np.asarray(self.kh)
self.ch = self.kh / self.mv
else:
self.nh = 1
self.ch = np.zeros_like(self.mv)
self.h = np.asarray(self.h)
self.nlayers = len(self.kv)
self.zlayer = np.cumsum(self.h)
self.cv = self.kv / self.mv
# self.use_normalised = True
#use_normalised is only relevant fot the radial component.
#I couldn't get non-normalised to work, hence why it is hard coded in
#rather than a user variable. I could never get the vertical
#normalisation working so I gave up trying
if self.bctop == 0:
self.phia0 = np.array([0.0, 1.0])
elif self.bctop == 1:
self.phia0 = np.array([1.0, 0.0])
#index of phib at bottom to check
if self.bcbot == 0:
self.phi_i_check = 0
elif self.bcbot == 1:
self.phi_i_check = 1
def un_normalised_average(self,s):
"""u(r) part of u(r, z, t) = u(r) * phi(z) * T(t), averaged betw r0 r1
"""
r0 = self.r0
r1 = self.r1
nn = r1/r0
return -self.un_normalised(r0,s,1)*2 / (nn**2-1)/s
def un_normalised(self, r, s, order):
"""u(r) part of u(r, z, t) = u(r) * phi(z) * T(t)
This version is normalised w.r.t. r0
"""
r0 = self.r0
r1 = self.r1
return besselj(order, r/r0*s)*bessely(0, s) - besselj(0, s)*bessely(order, r/r0*s)
def _radial_characteristic_curve(self, s):
"""Zeros of this function provide the radial eigenvalues"""
r0 = self.r0
r1 = self.r1
return self.un_normalised(r1, s, 1)
def _find_sn(self):
"""Find the radial eigenvalues"""
if self.kh is None:
self._sn=np.array([0])
else:
self._sn = find_n_roots(self._radial_characteristic_curve,
n=self.nh,x0=self.radial_roots_x0,
dx=self.radial_roots_dx,
p = self.radial_roots_p, max_iter=self.max_iter)
def _beta(self, alp, s):
"""beta from alp**2 = cv*beta**2 + cr * s**2 / r0**2"""
if self.r0 is None:
r0=1
else:
r0 = self.r0
a = 1/self.cv * alp**2 -(self.ch/self.cv)*s**2/r0**2
return np.sqrt(np.array(a, dtype=complex))
def _calc_phia_and_phidota(self):
"""Determine the pore pressure and pore pressure gradient at top of
each layer
Can only be done after finding alp
"""
sin = cmath.sin
cos = cmath.cos
self._phia = np.zeros((self.nh, self.nv, self.nlayers), dtype=float)
self._phidota = np.zeros((self.nh, self.nv, self.nlayers), dtype=float)
self._phia[:,:,0] = self.phia0[0]
self._phidota[:,:,0] = self.phia0[1]
# print('o'*40)
for i in range(self.nh):
s = self._sn[i]
square = np.zeros((2,2), dtype=complex)
for j in range(self.nv):
alp = self._alp[i, j]
phia = np.array([self.phia0[0], self.phia0[1]], dtype=complex)
# print(i, 's=', s)
# print(j, 'alp=', alp)
for k in range(self.nlayers):
h = self.h[k]
beta = self._betamn[i, j, k]
if cmath.polar(beta)[0]==0:
phib = np.array([phia[0],0], dtype=complex)
# phib[0] = phia[0]
# phib[1] = 0+0j
else:
square[0,0] = cos(beta*h)
square[0,1] = sin(beta*h) / beta
square[1,0] = -beta*sin(beta*h)
square[1,1] = cos(beta*h)
phib = np.dot(square, phia)
# print(k, beta, phia, phib)
if k != self.nlayers-1: # we are not in the last layer
# transfer phib to next layers phia
phia[0] = phib[0]
phia[1] = phib[1] * self.kv[k] / self.kv[k+1]
self._phia[i,j,k + 1] = phia[0].real
self._phidota[i, j, k+1] = phia[1].real#phib[1] * self.kv[i] / self.kv[i+1]
#check
# print('_', alp, 's', phib.real)
if abs(phib[self.phi_i_check].real)>0.1:
pass
print('bottom BC not satisfied. ih=',i,'jv=', j )
# raise ValueError('Bottom BC not satisfied')
def _vertical_characteristic_curve(self, alp, s):
"""the roots of this function will give the vertical eigenvalues
Parameters
----------
alp : float
alp common for all layers
s : float
radial eigen value
"""
sin = cmath.sin
cos = cmath.cos
phia = np.array([self.phia0[0], self.phia0[1]], dtype=complex)
square = np.zeros((2,2), dtype=complex)
beta = self._beta(alp, s)
# print("*", 's=', s)
# print('alp=', alp)
for i, h in enumerate(self.h):
if cmath.polar(beta[i])[0]==0:
phib = np.array([phia[0], 0], dtype=complex)
# phib[0] = phia[0]
# phib[1] = 0+0j
else:
square[0,0] = cos(beta[i]*h)
square[0,1] = sin(beta[i]*h) / beta[i]
square[1,0] = -beta[i]* sin(beta[i]*h)
square[1,1] = cos(beta[i]*h)
phib = np.dot(square, phia)
# print(i, beta[i], phia, phib)
if i != self.nlayers - 1: # we are not in the last layer
#transfer phib to the next layer phia
phia[0]= phib[0]
phia[1] = phib[1] * self.kv[i] / self.kv[i+1]
ret = phib[self.phi_i_check].real
return ret
def _find_alp(self):
"""find alp by matrix transfer method"""
self._alp = np.zeros((self.nh, self.nv), dtype=float)
for n, s in enumerate(self._sn):
if s==0:
alp_start_offset = min(1e-7, self.vertical_roots_dx)
else:
alp_start_offset = 0
if n==0:
alp=self.vertical_roots_x0
else:
alp = self._alp[n-1,0]
self._alp[n,:] = find_n_roots(self._vertical_characteristic_curve,
args=(s,),n= self.nv, x0 = alp+alp_start_offset,
dx = self.vertical_roots_dx, p = self.vertical_roots_p,
max_iter=self.max_iter, fsolve_kwargs={})
def _calc_Cn(self):
"""Calc Cn part of the coefficient Cmn"""
self._Cn = np.zeros(self.nh, dtype=float)
if self.kh is None:
self._Cn[:]=1
return
r0 = self.r0
r1 = self.r1
for n, s in enumerate(self._sn):
numer = -r0**2/s * self.un_normalised(r0, s, 1)
denom = r0**2/2 * (r1**2/r0**2 * self.un_normalised(r1, s, 0)**2 -
self.un_normalised(r0, s, 1)**2)
self._Cn[n] = numer / denom
def _calc_betamn(self):
"""calc beta for each layer and each eigenvalue combination"""
self._betamn = np.zeros((self.nh, self.nv, self.nlayers), dtype=complex)
for i in range(self.nh):
s = self._sn[i]
for j in range(self.nv):
alp = self._alp[i, j]
self._betamn[i,j,:] = self._beta(alp, s)
def _calc_Amn_and_Bmn(self):
"""calc coefficeints Amn and Bmn for each layer and eigenvalue
combination"""
sin = cmath.sin
cos = cmath.cos
self._Amn = np.zeros((self.nh, self.nv, self.nlayers), dtype=complex)
self._Bmn = np.zeros((self.nh, self.nv, self.nlayers), dtype=complex)
for i in range(self.nh):
s = self._sn[i]
square = np.zeros((2,2), dtype=float)
for j in range(self.nv):
alp = self._alp[i, j]
phia = self.phia0
for k in range(self.nlayers):
h = self.h[k]
bet = self._betamn[i, j, k]
phi_a = self._phia[i, j, k]
phi_a_dot = self._phidota[i, j, k]
if cmath.polar(bet)[0]==0:
self._Amn[i,j,k] = h*phi_a
self._Bmn[i,j,k] = h*phi_a**2
else:
self._Amn[i,j,k] = (phi_a*sin(bet*h)/bet -
phi_a_dot*cos(bet*h)/bet**2 + phi_a_dot/bet**2)
self._Bmn[i,j,k] = (h*phi_a**2*sin(bet*h)**2/2 +
h*phi_a**2*cos(bet*h)**2/2 +
phi_a**2*sin(bet*h)*cos(bet*h)/(2*bet) +
h*phi_a_dot**2*sin(bet*h)**2/(2*bet**2) +
h*phi_a_dot**2*cos(bet*h)**2/(2*bet**2) -
phi_a*phi_a_dot*cos(bet*h)**2/bet**2 +
phi_a*phi_a_dot/bet**2 -
phi_a_dot**2*sin(bet*h)*cos(bet*h)/(2*bet**3))
def _calc_Cm(self):
"""Calc Cm part of the coefficient Cmn"""
self._calc_Amn_and_Bmn()
self._Cm = np.zeros((self.nh, self.nv), dtype=complex)
for i in range(self.nh):
s = self._sn[i]
for j in range(self.nv):
alp = self._alp[i, j]
# phia = self.phia0
numer = 0.0
denom = 0.0
for k in range(self.nlayers):
h = self.h[k]
mv = self.mv[k]
Amn = self._Amn[i,j,k]
Bmn = self._Bmn[i,j,k]
numer += mv*Amn
denom += mv*Bmn
self._Cm[i,j] = numer / denom
def _calc_Cmn(self):
"""calc the coefficient Cmn = Cm * Cn"""
self._calc_Cn()
self._calc_Cm()
self._Cmn = np.zeros((self.nh, self.nv), dtype=complex)
for i in range(self.nh):
Cn = self._Cn[i]
for j in range(self.nv):
Cm = self._Cm[i,j]
self._Cmn[i,j] = Cm * Cn
def calc(self):
"""Perform all calculations"""
self._calc_derived_properties()
self._find_sn()
self._find_alp()
self._calc_betamn()
self._calc_phia_and_phidota()
self._calc_Cmn()
self._calc_por()
return
def make_all(self):
"""Check input, make_output produce files and plots"""
self.check_input_attributes()
self.make_output()
if getattr(self, 'save_data_to_file', False):
self._save_data()
if (getattr(self, 'save_figures_to_file', False) or
getattr(self, 'show_figures', False)):
self.produce_plots()
if getattr(self, 'save_figures_to_file', False):
self._save_figures()
if getattr(self, 'show_figures', False):
plt.show()
def make_output(self):
"""make all output"""
self._calc_derived_properties()
self._find_sn()
self._find_alp()
self._calc_betamn()
self._calc_phia_and_phidota()
self._calc_Cmn()
# self._calc_por()
header1 = "program: nogamiandli2003; geotecha version: {}; author: {}; date: {}\n".format(self.version, self.author, time.strftime('%Y/%m/%d %H:%M:%S'))
if not self.title is None:
header1 += "{}\n".format(self.title)
if not self.rcalc is None:
extra = " at r={0:.3g}".format(self.rcalc)
else:
extra=""
self._grid_data_dicts = []
if not self.tpor is None:
self._calc_por()
labels = ['{:.3g}'.format(v) for v in self.z]
d = {'name': '_data_por',
'data': self.por.T,
'row_labels': self.tpor,
'row_labels_label': 'Time',
'column_labels': labels,
'header': header1 + 'Pore pressure at depth'+extra}
self._grid_data_dicts.append(d)
if not self.t is None:
self._calc_avp()
labels = ['{:.3g} to {:.3g}'.format(0, sum(self.h))]
d = {'name': '_data_avp',
'data': self.avp.T,
'row_labels': self.t,
'row_labels_label': 'Time',
'column_labels': labels,
'header': header1 + 'Average pore pressure between depths' + extra}
self._grid_data_dicts.append(d)
labels = ['{:.3g} to {:.3g}'.format(0, sum(self.h))]
d = {'name': '_data_set',
'data': self.avp.T,
'row_labels': self.t,
'row_labels_label': 'Time',
'column_labels': labels,
'header': header1 + 'settlement between depths' + extra}
self._grid_data_dicts.append(d)
return
def produce_plots(self):
"""produce plots of analysis"""
# geotecha.plotting.one_d.pleasing_defaults()
# matplotlib.rcParams['figure.dpi'] = 80
# matplotlib.rcParams['savefig.dpi'] = 80
matplotlib.rcParams.update({'font.size': 11})
matplotlib.rcParams.update({'font.family': 'serif'})
self._figures=[]
#por
if not self.tpor is None:
f=self._plot_por()
title = 'fig_por'
f.set_label(title)
f.canvas.manager.set_window_title(title)
self._figures.append(f)
if not self.t is None:
f=self._plot_avp()
title = 'fig_avp'
f.set_label(title)
f.canvas.manager.set_window_title(title)
self._figures.append(f)
f=self._plot_set()
title = 'fig_set'
f.set_label(title)
f.canvas.manager.set_window_title(title)
self._figures.append(f)
if self.show_vert_eigs:
f = self._plot_vert_roots(1000)
title = 'vertical characteristic curve and eigs'
f.set_label(title)
f.canvas.manager.set_window_title(title)
self._figures.append(f)
def _plot_por(self):
"""plot depth vs pore pressure for various times
"""
if not self.rcalc is None:
extra = " at r={0:.3g}".format(self.rcalc)
else:
extra=" (radial average)"
t = self.tpor
line_labels = ['{:.3g}'.format(v) for v in t]
por_prop = self.plot_properties.pop('por', dict())
if not 'xlabel' in por_prop:
por_prop['xlabel'] = 'Pore pressure'+extra
#to do
fig_por = geotecha.plotting.one_d.plot_vs_depth(self.por, self.z,
line_labels=line_labels,
prop_dict=por_prop)
return fig_por
def _plot_avp(self):
"""plot average pore pressure of profile"""
if not self.rcalc is None:
extra = " at r={0:.3g}".format(self.rcalc)
else:
extra=" (radial average)"
t = self.t
line_labels = ['{:.3g} to {:.3g}'.format(0, sum(self.h))]
avp_prop = self.plot_properties.pop('avp', dict())
if not 'ylabel' in avp_prop:
avp_prop['ylabel'] = 'Average pore pressure'+extra
fig_avp = geotecha.plotting.one_d.plot_vs_time(t, self.avp.T,
line_labels=line_labels,
prop_dict=avp_prop)
return fig_avp
def _plot_set(self):
"""plot surface settlement"""
if not self.rcalc is None:
extra = " at r={0:.3g}".format(self.rcalc)
else:
extra=" (radial average)"
t = self.t
line_labels = ['{:.3g} to {:.3g}'.format(0, sum(self.h))]
set_prop = self.plot_properties.pop('set', dict())
if not 'ylabel' in set_prop:
set_prop['ylabel'] = 'surface settlement'+extra
fig_set = geotecha.plotting.one_d.plot_vs_time(t, self.set.T,
line_labels=line_labels,
prop_dict=set_prop)
fig_set.gca().invert_yaxis()
return fig_set
def _calc_Tm(self, alp, t):
"""calculate the Tm expression at a given time
Parameters
----------
alp : float
eigenvalue for layer
t : float
time value
Returns
-------
Tm: float
time dependant function
"""
loadmag = self.surcharge_vs_time.y
loadtim = self.surcharge_vs_time.x
(ramps_less_than_t, constants_less_than_t, steps_less_than_t,
ramps_containing_t, constants_containing_t) = pwise.segment_containing_also_segments_less_than_xi(loadtim, loadmag, t, steps_or_equal_to = True)
exp = math.exp
Tm=0
cv = 1 # I copied the Tm function from SchiffmanAndStein1970
i=0 #only one time value
for k in steps_less_than_t[i]:
sig1 = loadmag[k]
sig2 = loadmag[k+1]
Tm += (sig2-sig1)*exp(-cv * alp**2 * (t-loadtim[k]))
for k in ramps_containing_t[i]:
sig1 = loadmag[k]
sig2 = loadmag[k+1]
t1 = loadtim[k]
t2 = loadtim[k+1]
# Tm += (-sig1 + sig2)/(alp**2*cv*(-t1 + t2)) - (-sig1 + sig2)*exp(-alp**2*cv*t)*exp(alp**2*cv*t1)/(alp**2*cv*(-t1 + t2))
Tm += (-sig1 + sig2)/(alp**2*cv*(-t1 + t2)) - (-sig1 + sig2)*exp(-alp**2*cv*(t-t1))/(alp**2*cv*(-t1 + t2))
for k in ramps_less_than_t[i]:
sig1 = loadmag[k]
sig2 = loadmag[k+1]
t1 = loadtim[k]
t2 = loadtim[k+1]
# Tm += -(-sig1 + sig2)*exp(-alp**2*cv*t)*exp(alp**2*cv*t1)/(alp**2*cv*(-t1 + t2)) + (-sig1 + sig2)*exp(-alp**2*cv*t)*exp(alp**2*cv*t2)/(alp**2*cv*(-t1 + t2))
Tm += -(-sig1 + sig2)*exp(-alp**2*cv*(t-t1))/(alp**2*cv*(-t1 + t2)) + (-sig1 + sig2)*exp(-alp**2*cv*(t-t2))/(alp**2*cv*(-t1 + t2))
return Tm
def _calc_un(self):
"""u(r) part of u(r, z, t) = u(r) * phi(z) * T(t)"""
self._un = np.ones_like(self._sn)
if not self.kh is None:
for i, s in enumerate(self._sn):
if not self.rcalc is None:
self._un[i] = self.un_normalised(self.rcalc, s, 0)
else:
self._un[i] = self.un_normalised_average(s)
def _calc_por(self):
"""calculate the pore pressure"""
sin = cmath.sin
cos = cmath.cos
# if self.tpor is None:
# self.tpor==self.t
if self.tpor is None:
return
self.por = np.zeros((len(self.z), len(self.tpor)), dtype=float)
z_in_layer = np.searchsorted(self.zlayer, self.z)
self._calc_un()
for p, t in enumerate(self.tpor):
for i in range(self.nh):
s = self._sn[i]
un = self._un[i]
for j in range(self.nv):
alp = self._alp[i, j]
Tm = self._calc_Tm(alp, t)
for k, z in enumerate(self.z):
layer = z_in_layer[k]
zlay = z - (self.zlayer[layer] - self.h[layer])
bet = self._betamn[i, j, layer]
Cmn = self._Cmn[i, j].real
phi_a = self._phia[i, j, layer]
phi_a_dot = self._phidota[i, j, layer]
phi = (cos(bet * zlay) * phi_a +
sin(bet * zlay)/bet * phi_a_dot)
self.por[k, p] += Cmn * un * phi.real * Tm
def _calc_avp(self):
"""calculate the average pore pressure"""
sin = cmath.sin
cos = cmath.cos
h_all = sum(self.h)
if self.t is None:
return
self.avp = np.zeros((1, len(self.t)), dtype=float)
self.set = np.zeros((1, len(self.t)), dtype=float)
z_in_layer = np.searchsorted(self.zlayer, self.z)
self._calc_un()
for p, t in enumerate(self.t):
for i in range(self.nh):
s = self._sn[i]
un = self._un[i]
for j in range(self.nv):
alp = self._alp[i, j]
Tm = self._calc_Tm(alp, t)
load = pwise.pinterp_x_y(self.surcharge_vs_time, t)
for layer, h in enumerate(self.h):
# layer = z_in_layer[k]
# zlay = z - (self.zlayer[layer] - self.h[layer])
bet = self._betamn[i, j, layer]
Cmn = self._Cmn[i, j].real
phi_a = self._phia[i, j, layer]
phi_a_dot = self._phidota[i, j, layer]
phi = (sin(bet * h) / bet * phi_a +
(1-cos(bet * h))/bet**2*phi_a_dot)
self.avp[0, p] += Cmn * un * phi.real / h_all * Tm
self.set[0, p] += self.mv[layer] * (load * h /self.nh/self.nv - Cmn *
un * phi.real * Tm)
def _plot_vert_roots(self, npt=200):
"""Plot the vertical characteristic curve and it's roots
After a 'successful' run, use this to check the validity
of the calculated vertical eigenvalues and ensure none are missing
Parameters
----------
npt : int, optional
number of points to plot. default=200
"""
fig = plt.figure(figsize=(40, 8))
ax = fig.add_subplot('111')
for i in range(self.nh):
s = self._sn[i]
# amin=self._alp_min()[i]
# amin=0.001
amin = 0.3*self._alp[i,0]
x = np.linspace(amin, self._alp[i,-1], npt)
y = np.zeros_like(x)
for j,_x in enumerate(x):
y[j] = self._vertical_characteristic_curve(_x, s)
# print(x[i],y[i])
# print(y)
ax.plot(x, y, ls='-', marker='.', markersize=3)
c = ax.get_lines()[-1].get_color()
ax.set_ylim((-1,1))
ax.plot(self._alp[i,:], np.zeros_like(self._alp[i,:]), 'o', color=c)
ax.set_title('vertical_roots_x0={}, vertical_roots_dx={}, vertical_roots_p={}'.format(self.vertical_roots_x0, self.vertical_roots_dx, self.vertical_roots_p))
ax.set_xlabel('beta')
ax.set_ylabel('value of characterisrtic curve')
ax.grid()
fig.tight_layout()
return fig
def main():
"""Run nogamiandli2003 as script"""
a = GenericInputFileArgParser(obj=NogamiAndLi2003,
methods=[('make_all', [], {})],
pass_open_file=True)
a.main()
if __name__ == '__main__':
# import nose
# nose.runmodule(argv=['nose', '--verbosity=3', '--with-doctest'])
## nose.runmodule(argv=['nose', '--verbosity=3'])
main()
| gpl-3.0 |
weixuanfu/tpot | tpot/builtins/stacking_estimator.py | 1 | 3440 | # -*- coding: utf-8 -*-
"""This file is part of the TPOT library.
TPOT was primarily developed at the University of Pennsylvania by:
- Randal S. Olson ([email protected])
- Weixuan Fu ([email protected])
- Daniel Angell ([email protected])
- and many more generous open source contributors
TPOT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
TPOT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with TPOT. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin, is_classifier
from sklearn.utils import check_array
class StackingEstimator(BaseEstimator, TransformerMixin):
"""Meta-transformer for adding predictions and/or class probabilities as synthetic feature(s).
Parameters
----------
estimator : object
The base estimator from which the transformer is built.
"""
def __init__(self, estimator):
"""Create a StackingEstimator object.
Parameters
----------
estimator: object with fit, predict, and predict_proba methods.
The estimator to generate synthetic features from.
"""
self.estimator = estimator
def fit(self, X, y=None, **fit_params):
"""Fit the StackingEstimator meta-transformer.
Parameters
----------
X: array-like of shape (n_samples, n_features)
The training input samples.
y: array-like, shape (n_samples,)
The target values (integers that correspond to classes in classification, real numbers in regression).
fit_params:
Other estimator-specific parameters.
Returns
-------
self: object
Returns a copy of the estimator
"""
self.estimator.fit(X, y, **fit_params)
return self
def transform(self, X):
"""Transform data by adding two synthetic feature(s).
Parameters
----------
X: numpy ndarray, {n_samples, n_components}
New data, where n_samples is the number of samples and n_components is the number of components.
Returns
-------
X_transformed: array-like, shape (n_samples, n_features + 1) or (n_samples, n_features + 1 + n_classes) for classifier with predict_proba attribute
The transformed feature set.
"""
X = check_array(X)
X_transformed = np.copy(X)
# add class probabilities as a synthetic feature
if is_classifier(self.estimator) and hasattr(self.estimator, 'predict_proba'):
y_pred_proba = self.estimator.predict_proba(X)
# check all values that should be not infinity or not NAN
if np.all(np.isfinite(y_pred_proba)):
X_transformed = np.hstack((y_pred_proba, X))
# add class prediction as a synthetic feature
X_transformed = np.hstack((np.reshape(self.estimator.predict(X), (-1, 1)), X_transformed))
return X_transformed
| lgpl-3.0 |
automl/paramsklearn | ParamSklearn/components/classification/bernoulli_nb.py | 1 | 4101 | import numpy as np
import sklearn.naive_bayes
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, \
CategoricalHyperparameter
from ParamSklearn.components.base import ParamSklearnClassificationAlgorithm
from ParamSklearn.constants import *
class BernoulliNB(ParamSklearnClassificationAlgorithm):
def __init__(self, alpha, fit_prior, random_state=None, verbose=0):
self.alpha = alpha
if fit_prior.lower() == "true":
self.fit_prior = True
elif fit_prior.lower() == "false":
self.fit_prior = False
else:
self.fit_prior = fit_prior
self.random_state = random_state
self.verbose = int(verbose)
self.estimator = None
def fit(self, X, y):
while not self.configuration_fully_fitted():
self.iterative_fit(X, y, n_iter=1)
return self
def iterative_fit(self, X, y, n_iter=1, refit=False):
if refit:
self.estimator = None
if self.estimator is None:
self.n_iter = 0
self.fully_fit_ = False
self.estimator = sklearn.naive_bayes.BernoulliNB(
alpha=self.alpha, fit_prior=self.fit_prior)
self.classes_ = np.unique(y.astype(int))
for iter in range(n_iter):
start = min(self.n_iter * 1000, y.shape[0])
stop = min((self.n_iter + 1) * 1000, y.shape[0])
# Upper limit, scipy.sparse doesn't seem to handle max > len(matrix)
stop = min(stop, y.shape[0])
self.estimator.partial_fit(X[start:stop], y[start:stop], self.classes_)
self.n_iter += 1
if stop >= len(y):
self.fully_fit_ = True
break
return self
def configuration_fully_fitted(self):
if self.estimator is None:
return False
elif not hasattr(self, 'fully_fit_'):
return False
else:
return self.fully_fit_
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X)
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict_proba(X)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'BernoulliNB',
'name': 'Bernoulli Naive Bayes classifier',
'handles_missing_values': False,
'handles_nominal_values': False,
# sklearn website says: ... BernoulliNB is designed for
# binary/boolean features.
'handles_numerical_features': False,
'prefers_data_scaled': False,
'prefers_data_normalized': False,
'handles_regression': False,
'handles_classification': True,
'handles_multiclass': False,
'handles_multilabel': False,
'is_deterministic': True,
'handles_sparse': False,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (PREDICTIONS,),
'preferred_dtype': np.bool}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
cs = ConfigurationSpace()
# the smoothing parameter is a non-negative float
# I will limit it to 1000 and put it on a logarithmic scale. (SF)
# Please adjust that, if you know a proper range, this is just a guess.
alpha = UniformFloatHyperparameter(name="alpha", lower=1e-2, upper=100,
default=1, log=True)
fit_prior = CategoricalHyperparameter(name="fit_prior",
choices=["True", "False"],
default="True")
cs.add_hyperparameter(alpha)
cs.add_hyperparameter(fit_prior)
return cs
| bsd-3-clause |
MediffRobotics/DeepRobotics | DeepLearnMaterials/tutorials/matplotlibTUT/plt2_install.py | 3 | 1257 | # View more python tutorials on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
# 2 - install
"""
Make sure you have installed numpy.
------------------------------
INSTALL on Linux:
If you have python3, in terminal you will type:
$ sudo apt-get install python3-matplotlib
Otherwise, if python2, type:
$ sudo apt-get install python-matplotlib
-------------------------------
INSTALL on MacOS
For python3:
$ pip3 install matplotlib
For python2:
$ pip install matplotlib
--------------------------------
INSTALL on Windows:
1. make sure you install Visual Studio;
2. go to: https://pypi.python.org/pypi/matplotlib/
3. find the wheel file (a file ending in .whl) matches your python version and system
(e.g. cp35 for python3.5, win32 for 32-bit system, win_amd64 for 64-bit system);
4. Copy the .whl file to your project folder, open a command window,
and navigate to the project folder. Then use pip to install matplotlib:
e.g.
> cd python_work
python_work> python -m pip3 install matplotlib-1.4.3-cp35-none-win32.whl
If not success. Try the alternative way: using "Anaconda" to install.
Please search this by yourself.
""" | gpl-3.0 |
perrygeo/geopandas | tests/test_geocode.py | 7 | 5045 | from __future__ import absolute_import
from fiona.crs import from_epsg
import pandas as pd
import pandas.util.testing as tm
from shapely.geometry import Point
import geopandas as gpd
import nose
from geopandas import GeoSeries
from geopandas.tools import geocode, reverse_geocode
from geopandas.tools.geocoding import _prepare_geocode_result
from .util import unittest, mock, assert_geoseries_equal
def _skip_if_no_geopy():
try:
import geopy
except ImportError:
raise nose.SkipTest("Geopy not installed. Skipping tests.")
except SyntaxError:
raise nose.SkipTest("Geopy is known to be broken on Python 3.2. "
"Skipping tests.")
class ForwardMock(mock.MagicMock):
"""
Mock the forward geocoding function.
Returns the passed in address and (p, p+.5) where p increases
at each call
"""
def __init__(self, *args, **kwargs):
super(ForwardMock, self).__init__(*args, **kwargs)
self._n = 0.0
def __call__(self, *args, **kwargs):
self.return_value = args[0], (self._n, self._n + 0.5)
self._n += 1
return super(ForwardMock, self).__call__(*args, **kwargs)
class ReverseMock(mock.MagicMock):
"""
Mock the reverse geocoding function.
Returns the passed in point and 'address{p}' where p increases
at each call
"""
def __init__(self, *args, **kwargs):
super(ReverseMock, self).__init__(*args, **kwargs)
self._n = 0
def __call__(self, *args, **kwargs):
self.return_value = 'address{0}'.format(self._n), args[0]
self._n += 1
return super(ReverseMock, self).__call__(*args, **kwargs)
class TestGeocode(unittest.TestCase):
def setUp(self):
_skip_if_no_geopy()
self.locations = ['260 Broadway, New York, NY',
'77 Massachusetts Ave, Cambridge, MA']
self.points = [Point(-71.0597732, 42.3584308),
Point(-77.0365305, 38.8977332)]
def test_prepare_result(self):
# Calls _prepare_result with sample results from the geocoder call
# loop
p0 = Point(12.3, -45.6) # Treat these as lat/lon
p1 = Point(-23.4, 56.7)
d = {'a': ('address0', p0.coords[0]),
'b': ('address1', p1.coords[0])}
df = _prepare_geocode_result(d)
assert type(df) is gpd.GeoDataFrame
self.assertEqual(from_epsg(4326), df.crs)
self.assertEqual(len(df), 2)
self.assert_('address' in df)
coords = df.loc['a']['geometry'].coords[0]
test = p0.coords[0]
# Output from the df should be lon/lat
self.assertAlmostEqual(coords[0], test[1])
self.assertAlmostEqual(coords[1], test[0])
coords = df.loc['b']['geometry'].coords[0]
test = p1.coords[0]
self.assertAlmostEqual(coords[0], test[1])
self.assertAlmostEqual(coords[1], test[0])
def test_prepare_result_none(self):
p0 = Point(12.3, -45.6) # Treat these as lat/lon
d = {'a': ('address0', p0.coords[0]),
'b': (None, None)}
df = _prepare_geocode_result(d)
assert type(df) is gpd.GeoDataFrame
self.assertEqual(from_epsg(4326), df.crs)
self.assertEqual(len(df), 2)
self.assert_('address' in df)
row = df.loc['b']
self.assertEqual(len(row['geometry'].coords), 0)
self.assert_(pd.np.isnan(row['address']))
def test_bad_provider_forward(self):
with self.assertRaises(ValueError):
geocode(['cambridge, ma'], 'badprovider')
def test_bad_provider_reverse(self):
with self.assertRaises(ValueError):
reverse_geocode(['cambridge, ma'], 'badprovider')
def test_forward(self):
with mock.patch('geopy.geocoders.googlev3.GoogleV3.geocode',
ForwardMock()) as m:
g = geocode(self.locations, provider='googlev3', timeout=2)
self.assertEqual(len(self.locations), m.call_count)
n = len(self.locations)
self.assertIsInstance(g, gpd.GeoDataFrame)
expected = GeoSeries([Point(float(x) + 0.5, float(x)) for x in range(n)],
crs=from_epsg(4326))
assert_geoseries_equal(expected, g['geometry'])
tm.assert_series_equal(g['address'],
pd.Series(self.locations, name='address'))
def test_reverse(self):
with mock.patch('geopy.geocoders.googlev3.GoogleV3.reverse',
ReverseMock()) as m:
g = reverse_geocode(self.points, provider='googlev3', timeout=2)
self.assertEqual(len(self.points), m.call_count)
self.assertIsInstance(g, gpd.GeoDataFrame)
expected = GeoSeries(self.points, crs=from_epsg(4326))
assert_geoseries_equal(expected, g['geometry'])
address = pd.Series(['address' + str(x) for x in range(len(self.points))],
name='address')
tm.assert_series_equal(g['address'], address)
| bsd-3-clause |
anurag313/scikit-learn | examples/feature_stacker.py | 246 | 1906 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/statsmodels-0.5.0-py2.7-linux-x86_64.egg/statsmodels/sandbox/examples/ex_gam_results.py | 37 | 1660 | # -*- coding: utf-8 -*-
"""Example results for GAM from tests
Created on Mon Nov 07 13:13:15 2011
Author: Josef Perktold
The example is loaded from a test module. The test still fails but the
results look relatively good.
I don't know yet why there is the small difference and why GAM doesn't
converge in this case
"""
from statsmodels.sandbox.tests.test_gam import _estGAMGaussianLogLink
tt = _estGAMGaussianLogLink()
comp, const = tt.res_gam.smoothed_demeaned(tt.mod_gam.exog)
comp_glm_ = tt.res2.model.exog * tt.res2.params
comp1 = comp_glm_[:,1:4].sum(1)
mean1 = comp1.mean()
comp1 -= mean1
comp2 = comp_glm_[:,4:].sum(1)
mean2 = comp2.mean()
comp2 -= mean2
comp1_true = tt.res2.model.exog[:,1:4].sum(1)
mean1 = comp1_true.mean()
comp1_true -= mean1
comp2_true = tt.res2.model.exog[:,4:].sum(1)
mean2 = comp2_true.mean()
comp2_true -= mean2
noise = tt.res2.model.endog - tt.mu_true
noise_eta = tt.family.link(tt.res2.model.endog) - tt.y_true
import matplotlib.pyplot as plt
plt.figure()
plt.plot(noise, 'k.')
plt.figure()
plt.plot(comp, 'r-')
plt.plot(comp1, 'b-')
plt.plot(comp2, 'b-')
plt.plot(comp1_true, 'k--', lw=2)
plt.plot(comp2_true, 'k--', lw=2)
#the next doesn't make sense - non-linear
#c1 = tt.family.link(tt.family.link.inverse(comp1_true) + noise)
#c2 = tt.family.link(tt.family.link.inverse(comp2_true) + noise)
#not nice in example/plot: noise variance is constant not proportional
plt.plot(comp1_true + noise_eta, 'g.', alpha=0.95)
plt.plot(comp2_true + noise_eta, 'r.', alpha=0.95)
#plt.plot(c1, 'g.', alpha=0.95)
#plt.plot(c2, 'r.', alpha=0.95)
plt.title('Gaussian loglink, GAM (red), GLM (blue), true (black)')
plt.show()
| apache-2.0 |
jakobworldpeace/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 28 | 18031 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true, assert_greater
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_oneclass_adaboost_proba():
# Test predict_proba robustness for one class label input.
# In response to issue #7501
# https://github.com/scikit-learn/scikit-learn/issues/7501
y_t = np.ones(len(X))
clf = AdaBoostClassifier().fit(X, y_t)
assert_array_equal(clf.predict_proba(X), np.ones((len(X), 1)))
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Check we used multiple estimators
assert_greater(len(clf.estimators_), 1)
# Check for distinct random states (see issue #7408)
assert_equal(len(set(est.random_state for est in clf.estimators_)),
len(clf.estimators_))
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
reg = AdaBoostRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
assert score > 0.85
# Check we used multiple estimators
assert_true(len(reg.estimators_) > 1)
# Check for distinct random states (see issue #7408)
assert_equal(len(set(est.random_state for est in reg.estimators_)),
len(reg.estimators_))
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
| bsd-3-clause |
oscarbranson/tools | tools/chemistry.py | 1 | 3869 | """
The periodic table, and all it's info! And functions for doing chemical things.
"""
import os
import re
import pickle
import pandas as pd
def elements(all_isotopes=True):
"""
Loads a DataFrame of all elements and isotopes.
Scraped from https://www.webelements.com/
Returns
-------
pandas DataFrame with columns (element, atomic_number, isotope, atomic_weight, percent)
"""
el = pd.read_pickle(os.path.dirname(__file__) + '/periodic_table/elements.pkl')
if all_isotopes:
return el
else:
def wmean(g):
return (g.atomic_weight * g.percent).sum() / 100
iel = el.groupby('element').apply(wmean)
iel.name = 'atomic_weight'
return iel
def periodic_table():
"""
Loads dict containing all elements and associated metadata.
Scraped from https://www.webelements.com/
Returns
-------
dict
"""
with open(os.path.dirname(__file__) + '/periodic_table/periodic_table.pkl', 'rb') as f:
return pickle.load(f)
def decompose_molecule(molecule, n=1):
"""
Returns the chemical constituents of the molecule, and their number.
Parameters
----------
molecule : str
A molecule in standard chemical notation,
e.g. 'CO2', 'HCO3' or 'B(OH)4'.
Returns
-------
All elements in molecule with their associated counts : dict
"""
if isinstance(n, str):
n = int(n)
# define regexs
parens = re.compile('\(([A-z0-9()]+)\)([0-9]+)?')
stoich = re.compile('([A-Z][a-z]?)([0-9]+)?')
ps = parens.findall(molecule) # find subgroups in parentheses
rem = parens.sub('', molecule) # get remainder
if len(ps) > 0:
for s, ns in ps:
comp = decompose_molecule(s, ns)
for k, v in comp.items():
comp[k] = v * n
else:
comp = {}
for e, ns in stoich.findall(rem):
if e not in comp:
comp[e] = 0
if ns == '':
ns = 1 * n
else:
ns = int(ns) * n
comp[e] += ns
return comp
def calc_M(molecule):
"""
Returns molecular weight of molecule.
Parameters
----------
molecule : str
A molecule in standard chemical notation,
e.g. 'CO2', 'HCO3' or 'B(OH)4'.
Returns
-------
Molecular weight of molecule : dict
"""
# load periodic table
els = elements(all_isotopes=False)
comp = decompose_molecule(molecule)
m = 0
for k, v in comp.items():
m += els[k] * v
return m
def seawater(Sal=35., unit='mol/kg'):
"""
Standard mean composition of seawater.
From Dickson, Sabine and Christian (2007), Chapter 5, Table 3
@book{dickson2007guide,
title={Guide to best practices for ocean CO2 measurements.},
author={Dickson, Andrew Gilmore and Sabine, Christopher L and Christian, James Robert},
year={2007},
publisher={North Pacific Marine Science Organization},
howpublished="https://www.nodc.noaa.gov/ocads/oceans/Handbook_2007.html",
ISBN="1-897176-07-4"}
Parameters
----------
Sal : float
Salinity, default is 35
unit : str
Either 'mol/kg' or 'g/kg'.
Returns
-------
Seawater composition in chosen units at specified salinity : dict
"""
sw = {"Cl": 0.54586,
"SO4": 0.02824,
"Br": 0.00084,
"F": 0.00007,
"Na": 0.46906,
"Mg": 0.05282,
"Ca": 0.01028,
"K": 0.01021,
"Sr": 0.00009,
"B": 0.00042}
for s in sw.keys():
sw[s] *= Sal / 35.
if unit == 'g/kg':
for k, v in sw.items():
sw[k] = calc_M(k) * v
return sw
if __name__ == '__main__':
print()
print(calc_M('B(OH)3)'))
| gpl-3.0 |
xguse/scikit-bio | setup.py | 1 | 4813 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import os
import platform
import re
import ast
from setuptools import find_packages, setup
from setuptools.extension import Extension
from setuptools.command.build_ext import build_ext as _build_ext
# Bootstrap setup.py with numpy
# Huge thanks to coldfix's solution
# http://stackoverflow.com/a/21621689/579416
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
# version parsing from __init__ pulled from Flask's setup.py
# https://github.com/mitsuhiko/flask/blob/master/setup.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('skbio/__init__.py', 'rb') as f:
hit = _version_re.search(f.read().decode('utf-8')).group(1)
version = str(ast.literal_eval(hit))
classes = """
Development Status :: 4 - Beta
License :: OSI Approved :: BSD License
Topic :: Software Development :: Libraries
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Bio-Informatics
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Operating System :: Unix
Operating System :: POSIX
Operating System :: MacOS :: MacOS X
"""
classifiers = [s.strip() for s in classes.split('\n') if s]
description = ('Data structures, algorithms and educational '
'resources for bioinformatics.')
with open('README.rst') as f:
long_description = f.read()
# Dealing with Cython
USE_CYTHON = os.environ.get('USE_CYTHON', False)
ext = '.pyx' if USE_CYTHON else '.c'
# There's a bug in some versions of Python 3.4 that propagates
# -Werror=declaration-after-statement to extensions, instead of just affecting
# the compilation of the interpreter. See http://bugs.python.org/issue21121 for
# details. This acts as a workaround until the next Python 3 release -- thanks
# Wolfgang Maier (wolma) for the workaround!
ssw_extra_compile_args = ['-Wno-error=declaration-after-statement']
# Users with i686 architectures have reported that adding this flag allows
# SSW to be compiled. See https://github.com/biocore/scikit-bio/issues/409 and
# http://stackoverflow.com/q/26211814/3776794 for details.
if platform.machine() == 'i686':
ssw_extra_compile_args.append('-msse2')
extensions = [
Extension("skbio.stats.__subsample",
["skbio/stats/__subsample" + ext]),
Extension("skbio.alignment._ssw_wrapper",
["skbio/alignment/_ssw_wrapper" + ext,
"skbio/alignment/_lib/ssw.c"],
extra_compile_args=ssw_extra_compile_args)
]
if USE_CYTHON:
from Cython.Build import cythonize
extensions = cythonize(extensions)
setup(name='scikit-bio',
version=version,
license='BSD',
description=description,
long_description=long_description,
author="scikit-bio development team",
author_email="[email protected]",
maintainer="scikit-bio development team",
maintainer_email="[email protected]",
url='http://scikit-bio.org',
test_suite='nose.collector',
packages=find_packages(),
ext_modules=extensions,
cmdclass={'build_ext': build_ext},
setup_requires=['numpy >= 1.9.2'],
install_requires=[
'bz2file >= 0.98',
'CacheControl[FileCache] >= 0.11.5',
'contextlib2 >= 0.4.0',
'decorator >= 3.4.2',
'future >= 0.14.3',
'IPython >= 3.2.0',
'matplotlib >= 1.4.3',
'natsort >= 4.0.3',
'numpy >= 1.9.2',
'pandas >= 0.16.2',
'scipy >= 0.15.1',
'six >= 1.9.0'
],
extras_require={'test': ["HTTPretty", "nose", "pep8", "flake8",
"python-dateutil", "check-manifest"],
'doc': ["Sphinx == 1.2.2", "sphinx-bootstrap-theme"]},
classifiers=classifiers,
package_data={
'skbio.io.tests': ['data/*'],
'skbio.io.format.tests': ['data/*'],
'skbio.stats.tests': ['data/*'],
'skbio.stats.distance.tests': ['data/*'],
'skbio.stats.ordination.tests': ['data/*']
}
)
| bsd-3-clause |
MostafaGazar/tensorflow | tensorflow/contrib/factorization/python/ops/gmm.py | 6 | 7521 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Gaussian mixture model (GMM) clustering.
This goes on top of skflow API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.factorization.python.ops import gmm_ops
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators._sklearn import TransformerMixin
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
class GMM(estimator.Estimator, TransformerMixin):
"""GMM clustering."""
SCORES = 'scores'
ASSIGNMENTS = 'assignments'
ALL_SCORES = 'all_scores'
def __init__(self,
num_clusters,
model_dir=None,
random_seed=0,
params='wmc',
initial_clusters='random',
covariance_type='full',
batch_size=128,
steps=10,
continue_training=False,
config=None,
verbose=1):
"""Creates a model for running GMM training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
random_seed: Python integer. Seed for PRNG used to initialize centers.
params: Controls which parameters are updated in the training process.
Can contain any combination of "w" for weights, "m" for means,
and "c" for covars.
initial_clusters: specifies how to initialize the clusters for training.
See gmm_ops.gmm for the possible values.
covariance_type: one of "full", "diag".
batch_size: See TensorFlowEstimator
steps: See TensorFlowEstimator
continue_training: See TensorFlowEstimator
config: See TensorFlowEstimator
verbose: See TensorFlowEstimator
"""
super(GMM, self).__init__(
model_dir=model_dir,
config=config)
self.batch_size = batch_size
self.steps = steps
self.continue_training = continue_training
self.verbose = verbose
self._num_clusters = num_clusters
self._params = params
self._training_initial_clusters = initial_clusters
self._covariance_type = covariance_type
self._training_graph = None
self._random_seed = random_seed
def fit(self, x, y=None, monitors=None, logdir=None, steps=None):
"""Trains a GMM clustering on x.
Note: See TensorFlowEstimator for logic for continuous training and graph
construction across multiple calls to fit.
Args:
x: training input matrix of shape [n_samples, n_features].
y: labels. Should be None.
monitors: List of `Monitor` objects to print training progress and
invoke early stopping.
logdir: the directory to save the log file that can be used for optional
visualization.
steps: number of training steps. If not None, overrides the value passed
in constructor.
Returns:
Returns self.
"""
if logdir is not None:
self._model_dir = logdir
self._data_feeder = data_feeder.setup_train_data_feeder(
x, None, self._num_clusters, self.batch_size)
self._train_model(input_fn=self._data_feeder.input_builder,
feed_fn=self._data_feeder.get_feed_dict_fn(),
steps=steps or self.steps,
monitors=monitors,
init_feed_fn=self._data_feeder.get_feed_dict_fn())
return self
def predict(self, x, batch_size=None):
"""Predict cluster id for each element in x.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Array with same number of rows as x, containing cluster ids.
"""
return np.array([
prediction[GMM.ASSIGNMENTS] for prediction in
super(GMM, self).predict(x=x, batch_size=batch_size, as_iterable=True)])
def score(self, x, batch_size=None):
"""Predict total sum of distances to nearest clusters.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Total score.
"""
return np.sum(self.evaluate(x=x, batch_size=batch_size)[GMM.SCORES])
def transform(self, x, batch_size=None):
"""Transforms each element in x to distances to cluster centers.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Array with same number of rows as x, and num_clusters columns, containing
distances to the cluster centers.
"""
return np.array([
prediction[GMM.ALL_SCORES] for prediction in
super(GMM, self).predict(x=x, batch_size=batch_size, as_iterable=True)])
def clusters(self):
"""Returns cluster centers."""
clusters = tf.contrib.framework.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
return np.squeeze(clusters, 1)
def covariances(self):
"""Returns the covariances."""
return tf.contrib.framework.load_variable(
self.model_dir,
gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE)
def _parse_tensor_or_dict(self, features):
if isinstance(features, dict):
return array_ops.concat(1, [features[k] for k in sorted(features.keys())])
return features
def _get_train_ops(self, features, _):
(_,
_,
losses,
training_op) = gmm_ops.gmm(
self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters,
self._random_seed,
self._covariance_type,
self._params)
incr_step = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
loss = tf.reduce_sum(losses)
training_op = with_dependencies([training_op, incr_step], loss)
return training_op, loss
def _get_predict_ops(self, features):
(all_scores,
model_predictions,
_,
_) = gmm_ops.gmm(
self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters,
self._random_seed,
self._covariance_type,
self._params)
return {
GMM.ALL_SCORES: all_scores[0],
GMM.ASSIGNMENTS: model_predictions[0][0],
}
def _get_eval_ops(self, features, _, unused_metrics):
(_,
_,
losses,
_) = gmm_ops.gmm(
self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters,
self._random_seed,
self._covariance_type,
self._params)
return {
GMM.SCORES: tf.reduce_sum(losses),
}
| apache-2.0 |
saketkc/statsmodels | statsmodels/tsa/tests/test_ar.py | 19 | 12703 | """
Test AR Model
"""
import statsmodels.api as sm
from statsmodels.compat.python import range
from statsmodels.tsa.ar_model import AR
from numpy.testing import (assert_almost_equal, assert_allclose, assert_)
from statsmodels.tools.testing import assert_equal
from .results import results_ar
import numpy as np
import numpy.testing as npt
from pandas import Series, Index, TimeSeries, DatetimeIndex
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
class CheckARMixin(object):
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_6)
def test_bse(self):
bse = np.sqrt(np.diag(self.res1.cov_params())) # no dof correction
# for compatability with Stata
assert_almost_equal(bse, self.res2.bse_stata, DECIMAL_6)
assert_almost_equal(self.res1.bse, self.res2.bse_gretl, DECIMAL_5)
def test_llf(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_6)
def test_fpe(self):
assert_almost_equal(self.res1.fpe, self.res2.fpe, DECIMAL_6)
def test_pickle(self):
from statsmodels.compat.python import BytesIO
fh = BytesIO()
#test wrapped results load save pickle
self.res1.save(fh)
fh.seek(0,0)
res_unpickled = self.res1.__class__.load(fh)
assert_(type(res_unpickled) is type(self.res1))
class TestAROLSConstant(CheckARMixin):
"""
Test AR fit by OLS with a constant.
"""
@classmethod
def setupClass(cls):
data = sm.datasets.sunspots.load()
cls.res1 = AR(data.endog).fit(maxlag=9, method='cmle')
cls.res2 = results_ar.ARResultsOLS(constant=True)
def test_predict(self):
model = self.res1.model
params = self.res1.params
assert_almost_equal(model.predict(params),self.res2.FVOLSnneg1start0,
DECIMAL_4)
assert_almost_equal(model.predict(params),self.res2.FVOLSnneg1start9,
DECIMAL_4)
assert_almost_equal(model.predict(params, start=100),
self.res2.FVOLSnneg1start100, DECIMAL_4)
assert_almost_equal(model.predict(params, start=9, end=200),
self.res2.FVOLSn200start0, DECIMAL_4)
assert_almost_equal(model.predict(params, start=200, end=400),
self.res2.FVOLSn200start200, DECIMAL_4)
#assert_almost_equal(model.predict(params, n=200,start=-109),
# self.res2.FVOLSn200startneg109, DECIMAL_4)
assert_almost_equal(model.predict(params, start=308, end=424),
self.res2.FVOLSn100start325, DECIMAL_4)
assert_almost_equal(model.predict(params, start=9, end=310),
self.res2.FVOLSn301start9, DECIMAL_4)
assert_almost_equal(model.predict(params),
self.res2.FVOLSdefault, DECIMAL_4)
assert_almost_equal(model.predict(params, start=308, end=316),
self.res2.FVOLSn4start312, DECIMAL_4)
assert_almost_equal(model.predict(params, start=308, end=327),
self.res2.FVOLSn15start312, DECIMAL_4)
class TestAROLSNoConstant(CheckARMixin):
"""f
Test AR fit by OLS without a constant.
"""
@classmethod
def setupClass(cls):
data = sm.datasets.sunspots.load()
cls.res1 = AR(data.endog).fit(maxlag=9,method='cmle',trend='nc')
cls.res2 = results_ar.ARResultsOLS(constant=False)
def test_predict(self):
model = self.res1.model
params = self.res1.params
assert_almost_equal(model.predict(params),self.res2.FVOLSnneg1start0,
DECIMAL_4)
assert_almost_equal(model.predict(params),self.res2.FVOLSnneg1start9,
DECIMAL_4)
assert_almost_equal(model.predict(params, start=100),
self.res2.FVOLSnneg1start100, DECIMAL_4)
assert_almost_equal(model.predict(params, start=9, end=200),
self.res2.FVOLSn200start0, DECIMAL_4)
assert_almost_equal(model.predict(params, start=200, end=400),
self.res2.FVOLSn200start200, DECIMAL_4)
#assert_almost_equal(model.predict(params, n=200,start=-109),
# self.res2.FVOLSn200startneg109, DECIMAL_4)
assert_almost_equal(model.predict(params, start=308,end=424),
self.res2.FVOLSn100start325, DECIMAL_4)
assert_almost_equal(model.predict(params, start=9, end=310),
self.res2.FVOLSn301start9, DECIMAL_4)
assert_almost_equal(model.predict(params),
self.res2.FVOLSdefault, DECIMAL_4)
assert_almost_equal(model.predict(params, start=308, end=316),
self.res2.FVOLSn4start312, DECIMAL_4)
assert_almost_equal(model.predict(params, start=308, end=327),
self.res2.FVOLSn15start312, DECIMAL_4)
#class TestARMLEConstant(CheckAR):
class TestARMLEConstant(object):
@classmethod
def setupClass(cls):
data = sm.datasets.sunspots.load()
cls.res1 = AR(data.endog).fit(maxlag=9,method="mle", disp=-1)
cls.res2 = results_ar.ARResultsMLE(constant=True)
def test_predict(self):
model = self.res1.model
# for some reason convergence is off in 1 out of 10 runs on
# some platforms. i've never been able to replicate. see #910
params = np.array([ 5.66817602, 1.16071069, -0.39538222,
-0.16634055, 0.15044614, -0.09439266,
0.00906289, 0.05205291, -0.08584362,
0.25239198])
assert_almost_equal(model.predict(params), self.res2.FVMLEdefault,
DECIMAL_4)
assert_almost_equal(model.predict(params, start=9, end=308),
self.res2.FVMLEstart9end308, DECIMAL_4)
assert_almost_equal(model.predict(params, start=100, end=308),
self.res2.FVMLEstart100end308, DECIMAL_4)
assert_almost_equal(model.predict(params, start=0, end=200),
self.res2.FVMLEstart0end200, DECIMAL_4)
# Note: factor 0.5 in below two tests needed to meet precision on OS X.
assert_almost_equal(0.5 * model.predict(params, start=200, end=333),
0.5 * self.res2.FVMLEstart200end334, DECIMAL_4)
assert_almost_equal(0.5 * model.predict(params, start=308, end=333),
0.5 * self.res2.FVMLEstart308end334, DECIMAL_4)
assert_almost_equal(model.predict(params, start=9,end=309),
self.res2.FVMLEstart9end309, DECIMAL_4)
assert_almost_equal(model.predict(params, end=301),
self.res2.FVMLEstart0end301, DECIMAL_4)
assert_almost_equal(model.predict(params, start=4, end=312),
self.res2.FVMLEstart4end312, DECIMAL_4)
assert_almost_equal(model.predict(params, start=2, end=7),
self.res2.FVMLEstart2end7, DECIMAL_4)
def test_dynamic_predict(self):
# for some reason convergence is off in 1 out of 10 runs on
# some platforms. i've never been able to replicate. see #910
params = np.array([ 5.66817602, 1.16071069, -0.39538222,
-0.16634055, 0.15044614, -0.09439266,
0.00906289, 0.05205291, -0.08584362,
0.25239198])
res1 = self.res1
res2 = self.res2
rtol = 8e-6
# assert_raises pre-sample
# 9, 51
start, end = 9, 51
fv = res1.model.predict(params, start, end, dynamic=True)
assert_allclose(fv, res2.fcdyn[start:end+1], rtol=rtol)
# 9, 308
start, end = 9, 308
fv = res1.model.predict(params, start, end, dynamic=True)
assert_allclose(fv, res2.fcdyn[start:end+1], rtol=rtol)
# 9, 333
start, end = 9, 333
fv = res1.model.predict(params, start, end, dynamic=True)
assert_allclose(fv, res2.fcdyn[start:end+1], rtol=rtol)
# 100, 151
start, end = 100, 151
fv = res1.model.predict(params, start, end, dynamic=True)
assert_allclose(fv, res2.fcdyn2[start:end+1], rtol=rtol)
# 100, 308
start, end = 100, 308
fv = res1.model.predict(params, start, end, dynamic=True)
assert_allclose(fv, res2.fcdyn2[start:end+1], rtol=rtol)
# 100, 333
start, end = 100, 333
fv = res1.model.predict(params, start, end, dynamic=True)
assert_allclose(fv, res2.fcdyn2[start:end+1], rtol=rtol)
# 308, 308
start, end = 308, 308
fv = res1.model.predict(params, start, end, dynamic=True)
assert_allclose(fv, res2.fcdyn3[start:end+1], rtol=rtol)
# 308, 333
start, end = 308, 333
fv = res1.model.predict(params, start, end, dynamic=True)
assert_allclose(fv, res2.fcdyn3[start:end+1], rtol=rtol)
# 309, 333
start, end = 309, 333
fv = res1.model.predict(params, start, end, dynamic=True)
assert_allclose(fv, res2.fcdyn4[start:end+1], rtol=rtol)
# None, None
start, end = None, None
fv = res1.model.predict(params, dynamic=True)
assert_allclose(fv, res2.fcdyn[9:309], rtol=rtol)
class TestAutolagAR(object):
@classmethod
def setupClass(cls):
data = sm.datasets.sunspots.load()
endog = data.endog
results = []
for lag in range(1,16+1):
endog_tmp = endog[16-lag:]
r = AR(endog_tmp).fit(maxlag=lag)
# See issue #324 for why we're doing these corrections vs. R
# results
k_ar = r.k_ar
k_trend = r.k_trend
log_sigma2 = np.log(r.sigma2)
aic = r.aic
aic = (aic - log_sigma2) * (1 + k_ar)/(1 + k_ar + k_trend)
aic += log_sigma2
hqic = r.hqic
hqic = (hqic - log_sigma2) * (1 + k_ar)/(1 + k_ar + k_trend)
hqic += log_sigma2
bic = r.bic
bic = (bic - log_sigma2) * (1 + k_ar)/(1 + k_ar + k_trend)
bic += log_sigma2
results.append([aic, hqic, bic, r.fpe])
res1 = np.asarray(results).T.reshape(4,-1, order='C')
# aic correction to match R
cls.res1 = res1
cls.res2 = results_ar.ARLagResults("const").ic
def test_ic(self):
npt.assert_almost_equal(self.res1, self.res2, DECIMAL_6)
def test_ar_dates():
# just make sure they work
data = sm.datasets.sunspots.load()
dates = sm.tsa.datetools.dates_from_range('1700', length=len(data.endog))
endog = Series(data.endog, index=dates)
ar_model = sm.tsa.AR(endog, freq='A').fit(maxlag=9, method='mle', disp=-1)
pred = ar_model.predict(start='2005', end='2015')
predict_dates = sm.tsa.datetools.dates_from_range('2005', '2015')
from pandas import DatetimeIndex # pylint: disable-msg=E0611
predict_dates = DatetimeIndex(predict_dates, freq='infer')
assert_equal(ar_model.data.predict_dates, predict_dates)
assert_equal(pred.index, predict_dates)
def test_ar_named_series():
dates = sm.tsa.datetools.dates_from_range("2011m1", length=72)
y = Series(np.random.randn(72), name="foobar", index=dates)
results = sm.tsa.AR(y).fit(2)
assert_(results.params.index.equals(Index(["const", "L1.foobar",
"L2.foobar"])))
def test_ar_start_params():
# fix 236
# smoke test
data = sm.datasets.sunspots.load()
res = AR(data.endog).fit(maxlag=9, start_params=0.1*np.ones(10),
method="mle", disp=-1, maxiter=100)
def test_ar_series():
# smoke test for 773
dta = sm.datasets.macrodata.load_pandas().data["cpi"].diff().dropna()
dates = sm.tsa.datetools.dates_from_range("1959Q1", length=len(dta))
dta.index = dates
ar = AR(dta).fit(maxlags=15)
ar.bse
def test_ar_select_order():
# 2118
np.random.seed(12345)
y = sm.tsa.arma_generate_sample([1, -.75, .3], [1], 100)
ts = TimeSeries(y, index=DatetimeIndex(start='1/1/1990', periods=100,
freq='M'))
ar = AR(ts)
res = ar.select_order(maxlag=12, ic='aic')
assert_(res == 2)
#TODO: likelihood for ARX model?
#class TestAutolagARX(object):
# def setup(self):
# data = sm.datasets.macrodata.load()
# endog = data.data.realgdp
# exog = data.data.realint
# results = []
# for lag in range(1, 26):
# endog_tmp = endog[26-lag:]
# exog_tmp = exog[26-lag:]
# r = AR(endog_tmp, exog_tmp).fit(maxlag=lag, trend='ct')
# results.append([r.aic, r.hqic, r.bic, r.fpe])
# self.res1 = np.asarray(results).T.reshape(4,-1, order='C')
| bsd-3-clause |
pprett/statsmodels | statsmodels/sandbox/infotheo.py | 3 | 16324 | """
Information Theoretic and Entropy Measures
References
----------
Golan, As. 2008. "Information and Entropy Econometrics -- A Review and
Synthesis." Foundations And Trends in Econometrics 2(1-2), 1-145.
Golan, A., Judge, G., and Miller, D. 1996. Maximum Entropy Econometrics.
Wiley & Sons, Chichester.
"""
#For MillerMadow correction
#Miller, G. 1955. Note on the bias of information estimates. Info. Theory
# Psychol. Prob. Methods II-B:95-100.
#For ChaoShen method
#Chao, A., and T.-J. Shen. 2003. Nonparametric estimation of Shannon's index of diversity when
#there are unseen species in sample. Environ. Ecol. Stat. 10:429-443.
#Good, I. J. 1953. The population frequencies of species and the estimation of population parameters.
#Biometrika 40:237-264.
#Horvitz, D.G., and D. J. Thompson. 1952. A generalization of sampling without replacement from a finute universe. J. Am. Stat. Assoc. 47:663-685.
#For NSB method
#Nemenman, I., F. Shafee, and W. Bialek. 2002. Entropy and inference, revisited. In: Dietterich, T.,
#S. Becker, Z. Gharamani, eds. Advances in Neural Information Processing Systems 14: 471-478.
#Cambridge (Massachusetts): MIT Press.
#For shrinkage method
#Dougherty, J., Kohavi, R., and Sahami, M. (1995). Supervised and unsupervised discretization of
#continuous features. In International Conference on Machine Learning.
#Yang, Y. and Webb, G. I. (2003). Discretization for naive-bayes learning: managing discretization
#bias and variance. Technical Report 2003/131 School of Computer Science and Software Engineer-
#ing, Monash University.
from scipy import maxentropy, stats
import numpy as np
from matplotlib import pyplot as plt
#TODO: change these to use maxentutils so that over/underflow is handled
#with the logsumexp.
from scipy.maxentropy import logsumexp as lse
def logsumexp(a, axis=None):
"""
Compute the log of the sum of exponentials log(e^{a_1}+...e^{a_n}) of a
Avoids numerical overflow.
Parameters
----------
a : array-like
The vector to exponentiate and sum
axis : int, optional
The axis along which to apply the operation. Defaults is None.
Returns
-------
sum(log(exp(a)))
Notes
-----
This function was taken from the mailing list
http://mail.scipy.org/pipermail/scipy-user/2009-October/022931.html
This should be superceded by the ufunc when it is finished.
"""
if axis is None:
# Use the scipy.maxentropy version.
return lse(a)
a = asarray(a)
shp = list(a.shape)
shp[axis] = 1
a_max = a.max(axis=axis)
s = log(exp(a - a_max.reshape(shp)).sum(axis=axis))
lse = a_max + s
return lse
def _isproperdist(X):
"""
Checks to see if `X` is a proper probability distribution
"""
X = np.asarray(X)
if not np.allclose(np.sum(X), 1) or not np.all(X>=0) or not np.all(X<=1):
return False
else:
return True
def discretize(X, method="ef", nbins=None):
"""
Discretize `X`
Parameters
----------
bins : int, optional
Number of bins. Default is floor(sqrt(N))
method : string
"ef" is equal-frequency binning
"ew" is equal-width binning
Examples
--------
"""
nobs = len(X)
if nbins == None:
nbins = np.floor(np.sqrt(nobs))
if method == "ef":
discrete = np.ceil(nbins * stats.rankdata(X)/nobs)
if method == "ew":
width = np.max(X) - np.min(X)
width = np.floor(width/nbins)
svec, ivec = stats.fastsort(X)
discrete = np.zeros(nobs)
binnum = 1
base = svec[0]
discrete[ivec[0]] = binnum
for i in xrange(1,nobs):
if svec[i] < base + width:
discrete[ivec[i]] = binnum
else:
base = svec[i]
binnum += 1
discrete[ivec[i]] = binnum
return discrete
#TODO: looks okay but needs more robust tests for corner cases
def logbasechange(a,b):
"""
There is a one-to-one transformation of the entropy value from
a log base b to a log base a :
H_{b}(X)=log_{b}(a)[H_{a}(X)]
Returns
-------
log_{b}(a)
"""
return np.log(b)/np.log(a)
def natstobits(X):
"""
Converts from nats to bits
"""
return logbasechange(np.e, 2) * X
def bitstonats(X):
"""
Converts from bits to nats
"""
return logbasechange(2, np.e) * X
#TODO: make this entropy, and then have different measures as
#a method
def shannonentropy(px, logbase=2):
"""
This is Shannon's entropy
Parameters
-----------
logbase, int or np.e
The base of the log
px : 1d or 2d array_like
Can be a discrete probability distribution, a 2d joint distribution,
or a sequence of probabilities.
Returns
-----
For log base 2 (bits) given a discrete distribution
H(p) = sum(px * log2(1/px) = -sum(pk*log2(px)) = E[log2(1/p(X))]
For log base 2 (bits) given a joint distribution
H(px,py) = -sum_{k,j}*w_{kj}log2(w_{kj})
Notes
-----
shannonentropy(0) is defined as 0
"""
#TODO: haven't defined the px,py case?
px = np.asarray(px)
if not np.all(px <= 1) or not np.all(px >= 0):
raise ValueError, "px does not define proper distribution"
entropy = -np.sum(np.nan_to_num(px*np.log2(px)))
if logbase != 2:
return logbasechange(2,logbase) * entropy
else:
return entropy
# Shannon's information content
def shannoninfo(px, logbase=2):
"""
Shannon's information
Parameters
----------
px : float or array-like
`px` is a discrete probability distribution
Returns
-------
For logbase = 2
np.log2(px)
"""
px = np.asarray(px)
if not np.all(px <= 1) or not np.all(px >= 0):
raise ValueError, "px does not define proper distribution"
if logbase != 2:
return - logbasechange(2,logbase) * np.log2(px)
else:
return - np.log2(px)
def condentropy(px, py, pxpy=None, logbase=2):
"""
Return the conditional entropy of X given Y.
Parameters
----------
px : array-like
py : array-like
pxpy : array-like, optional
If pxpy is None, the distributions are assumed to be independent
and conendtropy(px,py) = shannonentropy(px)
logbase : int or np.e
Returns
-------
sum_{kj}log(q_{j}/w_{kj}
where q_{j} = Y[j]
and w_kj = X[k,j]
"""
if not _isproperdist(px) or not _isproperdist(py):
raise ValueError, "px or py is not a proper probability distribution"
if pxpy != None and not _isproperdist(pxpy):
raise ValueError, "pxpy is not a proper joint distribtion"
if pxpy == None:
pxpy = np.outer(py,px)
condent = np.sum(pxpy * np.nan_to_num(np.log2(py/pxpy)))
if logbase == 2:
return condent
else:
return logbasechange(2, logbase) * condent
def mutualinfo(px,py,pxpy, logbase=2):
"""
Returns the mutual information between X and Y.
Parameters
----------
px : array-like
Discrete probability distribution of random variable X
py : array-like
Discrete probability distribution of random variable Y
pxpy : 2d array-like
The joint probability distribution of random variables X and Y.
Note that if X and Y are independent then the mutual information
is zero.
logbase : int or np.e, optional
Default is 2 (bits)
Returns
-------
shannonentropy(px) - condentropy(px,py,pxpy)
"""
if not _isproperdist(px) or not _isproperdist(py):
raise ValueError, "px or py is not a proper probability distribution"
if pxpy != None and not _isproperdist(pxpy):
raise ValueError, "pxpy is not a proper joint distribtion"
if pxpy == None:
pxpy = np.outer(py,px)
return shannonentropy(px, logbase=logbase) - condentropy(px,py,pxpy,
logbase=logbase)
def corrent(px,py,pxpy,logbase=2):
"""
An information theoretic correlation measure.
Reflects linear and nonlinear correlation between two random variables
X and Y, characterized by the discrete probability distributions px and py
respectively.
Parameters
----------
px : array-like
Discrete probability distribution of random variable X
py : array-like
Discrete probability distribution of random variable Y
pxpy : 2d array-like, optional
Joint probability distribution of X and Y. If pxpy is None, X and Y
are assumed to be independent.
logbase : int or np.e, optional
Default is 2 (bits)
Returns
-------
mutualinfo(px,py,pxpy,logbase=logbase)/shannonentropy(py,logbase=logbase)
Notes
-----
This is also equivalent to
corrent(px,py,pxpy) = 1 - condent(px,py,pxpy)/shannonentropy(py)
"""
if not _isproperdist(px) or not _isproperdist(py):
raise ValueError, "px or py is not a proper probability distribution"
if pxpy != None and not _isproperdist(pxpy):
raise ValueError, "pxpy is not a proper joint distribtion"
if pxpy == None:
pxpy = np.outer(py,px)
return mutualinfo(px,py,pxpy,logbase=logbase)/shannonentropy(py,
logbase=logbase)
def covent(px,py,pxpy,logbase=2):
"""
An information theoretic covariance measure.
Reflects linear and nonlinear correlation between two random variables
X and Y, characterized by the discrete probability distributions px and py
respectively.
Parameters
----------
px : array-like
Discrete probability distribution of random variable X
py : array-like
Discrete probability distribution of random variable Y
pxpy : 2d array-like, optional
Joint probability distribution of X and Y. If pxpy is None, X and Y
are assumed to be independent.
logbase : int or np.e, optional
Default is 2 (bits)
Returns
-------
condent(px,py,pxpy,logbase=logbase) + condent(py,px,pxpy,
logbase=logbase)
Notes
-----
This is also equivalent to
covent(px,py,pxpy) = condent(px,py,pxpy) + condent(py,px,pxpy)
"""
if not _isproperdist(px) or not _isproperdist(py):
raise ValueError, "px or py is not a proper probability distribution"
if pxpy != None and not _isproperdist(pxpy):
raise ValueError, "pxpy is not a proper joint distribtion"
if pxpy == None:
pxpy = np.outer(py,px)
return condent(px,py,pxpy,logbase=logbase) + condent(py,px,pxpy,
logbase=logbase)
#### Generalized Entropies ####
def renyientropy(px,alpha=1,logbase=2,measure='R'):
"""
Renyi's generalized entropy
Parameters
----------
px : array-like
Discrete probability distribution of random variable X. Note that
px is assumed to be a proper probability distribution.
logbase : int or np.e, optional
Default is 2 (bits)
alpha : float or inf
The order of the entropy. The default is 1, which in the limit
is just Shannon's entropy. 2 is Renyi (Collision) entropy. If
the string "inf" or numpy.inf is specified the min-entropy is returned.
measure : str, optional
The type of entropy measure desired. 'R' returns Renyi entropy
measure. 'T' returns the Tsallis entropy measure.
Returns
-------
1/(1-alpha)*log(sum(px**alpha))
In the limit as alpha -> 1, Shannon's entropy is returned.
In the limit as alpha -> inf, min-entropy is returned.
"""
#TODO:finish returns
#TODO:add checks for measure
if not _isproperdist(px):
raise ValueError, "px is not a proper probability distribution"
alpha = float(alpha)
if alpha == 1:
genent = shannonentropy(px)
if logbase != 2:
return logbasechange(2, logbase) * genent
return genent
elif 'inf' in string(alpha).lower() or alpha == np.inf:
return -np.log(np.max(px))
# gets here if alpha != (1 or inf)
px = px**alpha
genent = np.log(px.sum())
if logbase == 2:
return 1/(1-alpha) * genent
else:
return 1/(1-alpha) * logbasechange(2, logbase) * genent
#TODO: before completing this, need to rethink the organization of
# (relative) entropy measures, ie., all put into one function
# and have kwdargs, etc.?
def gencrossentropy(px,py,pxpy,alpha=1,logbase=2, measure='T'):
"""
Generalized cross-entropy measures.
Parameters
----------
px : array-like
Discrete probability distribution of random variable X
py : array-like
Discrete probability distribution of random variable Y
pxpy : 2d array-like, optional
Joint probability distribution of X and Y. If pxpy is None, X and Y
are assumed to be independent.
logbase : int or np.e, optional
Default is 2 (bits)
measure : str, optional
The measure is the type of generalized cross-entropy desired. 'T' is
the cross-entropy version of the Tsallis measure. 'CR' is Cressie-Read
measure.
"""
if __name__ == "__main__":
print "From Golan (2008) \"Information and Entropy Econometrics -- A Review \
and Synthesis"
print "Table 3.1"
# Examples from Golan (2008)
X = [.2,.2,.2,.2,.2]
Y = [.322,.072,.511,.091,.004]
for i in X:
print shannoninfo(i)
for i in Y:
print shannoninfo(i)
print shannonentropy(X)
print shannonentropy(Y)
p = [1e-5,1e-4,.001,.01,.1,.15,.2,.25,.3,.35,.4,.45,.5]
plt.subplot(111)
plt.ylabel("Information")
plt.xlabel("Probability")
x = np.linspace(0,1,100001)
plt.plot(x, shannoninfo(x))
# plt.show()
plt.subplot(111)
plt.ylabel("Entropy")
plt.xlabel("Probability")
x = np.linspace(0,1,101)
plt.plot(x, map(shannonentropy, zip(x,1-x)))
# plt.show()
# define a joint probability distribution
# from Golan (2008) table 3.3
w = np.array([[0,0,1./3],[1/9.,1/9.,1/9.],[1/18.,1/9.,1/6.]])
# table 3.4
px = w.sum(0)
py = w.sum(1)
H_X = shannonentropy(px)
H_Y = shannonentropy(py)
H_XY = shannonentropy(w)
H_XgivenY = condentropy(px,py,w)
H_YgivenX = condentropy(py,px,w)
# note that cross-entropy is not a distance measure as the following shows
D_YX = logbasechange(2,np.e)*stats.entropy(px, py)
D_XY = logbasechange(2,np.e)*stats.entropy(py, px)
I_XY = mutualinfo(px,py,w)
print "Table 3.3"
print H_X,H_Y, H_XY, H_XgivenY, H_YgivenX, D_YX, D_XY, I_XY
print "discretize functions"
X=np.array([21.2,44.5,31.0,19.5,40.6,38.7,11.1,15.8,31.9,25.8,20.2,14.2,
24.0,21.0,11.3,18.0,16.3,22.2,7.8,27.8,16.3,35.1,14.9,17.1,28.2,16.4,
16.5,46.0,9.5,18.8,32.1,26.1,16.1,7.3,21.4,20.0,29.3,14.9,8.3,22.5,
12.8,26.9,25.5,22.9,11.2,20.7,26.2,9.3,10.8,15.6])
discX = discretize(X)
#CF: R's infotheo
#TODO: compare to pyentropy quantize?
print
print "Example in section 3.6 of Golan, using table 3.3"
print "Bounding errors using Fano's inequality"
print "H(P_{e}) + P_{e}log(K-1) >= H(X|Y)"
print "or, a weaker inequality"
print "P_{e} >= [H(X|Y) - 1]/log(K)"
print "P(x) = %s" % px
print "X = 3 has the highest probability, so this is the estimate Xhat"
pe = 1 - px[2]
print "The probability of error Pe is 1 - p(X=3) = %0.4g" % pe
H_pe = shannonentropy([pe,1-pe])
print "H(Pe) = %0.4g and K=3" % H_pe
print "H(Pe) + Pe*log(K-1) = %0.4g >= H(X|Y) = %0.4g" % \
(H_pe+pe*np.log2(2), H_XgivenY)
print "or using the weaker inequality"
print "Pe = %0.4g >= [H(X) - 1]/log(K) = %0.4g" % (pe, (H_X - 1)/np.log2(3))
print "Consider now, table 3.5, where there is additional information"
print "The conditional probabilities of P(X|Y=y) are "
w2 = np.array([[0.,0.,1.],[1/3.,1/3.,1/3.],[1/6.,1/3.,1/2.]])
print w2
# not a proper distribution?
print "The probability of error given this information is"
print "Pe = [H(X|Y) -1]/log(K) = %0.4g" % ((np.mean([0,shannonentropy(w2[1]),shannonentropy(w2[2])])-1)/np.log2(3))
print "such that more information lowers the error"
### Stochastic processes
markovchain = np.array([[.553,.284,.163],[.465,.312,.223],[.420,.322,.258]])
| bsd-3-clause |
costypetrisor/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 4 | 26157 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg and"
" lbfgs solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
| bsd-3-clause |
stimpsonsg/moose | modules/tensor_mechanics/tests/capped_drucker_prager/small_deform2.py | 23 | 3933 | #!/usr/bin/env python
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
def expected(scheme, angle_degrees):
angle = angle_degrees * np.pi / 180.0
cohesion = 10
friction_degrees = 20
tip_smoother = 4
mean = -10
friction = friction_degrees * np.pi / 180.0
if (scheme == "native"):
coh = cohesion
fric = friction
elif (scheme == "outer_tip"):
coh = 2 * np.sqrt(3) * cohesion * np.cos(friction) / (3.0 - np.sin(friction))
fric = np.arctan(2 * np.sin(friction) / np.sqrt(3) / (3.0 - np.sin(friction)))
elif (scheme == "inner_tip"):
coh = 2 * np.sqrt(3) * cohesion * np.cos(friction) / (3.0 + np.sin(friction))
fric = np.arctan(2 * np.sin(friction) / np.sqrt(3) / (3.0 + np.sin(friction)))
elif (scheme == "lode_zero"):
coh = cohesion * np.cos(friction)
fric = np.arctan(np.sin(friction) / 3.0)
elif (scheme == "inner_edge"):
coh = 3 * cohesion * np.cos(friction) / np.sqrt(9.0 + 3.0 * np.power(np.sin(friction), 2))
fric = np.arctan(np.sin(friction) / np.sqrt(9.0 + 3.0 * np.power(np.sin(friction), 2)))
bar = np.sqrt(np.power(coh - mean * 3.0 * np.tan(fric), 2) - np.power(tip_smoother, 2))
x = bar * np.cos(angle)
y = bar * np.sin(angle)
return (x, y)
def sigma_mean(stress):
return (stress[0] + stress[3] + stress[5])/3.0
def sigma_bar(stress):
mean = sigma_mean(stress)
return np.sqrt(0.5 * (np.power(stress[0] - mean, 2) + 2*stress[1]*stress[1] + 2*stress[2]*stress[2] + np.power(stress[3] - mean, 2) + 2*stress[4]*stress[4] + np.power(stress[5] - mean, 2)))
def third_inv(stress):
mean = sigma_mean(stress)
return (stress[0] - mean)*(stress[3] - mean)*(stress[5] - mean)
def lode_angle(stress):
bar = sigma_bar(stress)
third = third_inv(stress)
return np.arcsin(-1.5 * np.sqrt(3.0) * third / np.power(bar, 3)) / 3.0
def moose_result(fn):
f = open(fn)
x = []
y = []
for line in f:
if not line.strip():
continue
line = line.strip()
if line.startswith("time") or line.startswith("0"):
continue
line = map(float, line.split(","))
if line[1] < -1E-10:
continue # this is an elastic deformation
bar = sigma_bar(line[4:])
lode = lode_angle(line[4:])
x.append(bar * np.cos(lode))
y.append(bar * np.sin(lode))
f.close()
return (x, y)
angles = np.arange(-30, 31, 1)
plt.figure()
plt.plot(expected("native", angles)[0], expected("native", angles)[1], 'k-', label = 'expected (native)')
mr = moose_result("gold/small_deform2_native.csv")
plt.plot(mr[0], mr[1], 'k^', label = 'MOOSE (native)')
plt.plot(expected("outer_tip", angles)[0], expected("outer_tip", angles)[1], 'g-', label = 'expected (outer_tip)')
mr = moose_result("gold/small_deform2_outer_tip.csv")
plt.plot(mr[0], mr[1], 'g^', label = 'MOOSE (outer_tip)')
plt.plot(expected("inner_tip", angles)[0], expected("inner_tip", angles)[1], 'b-', label = 'expected (inner_tip)')
mr = moose_result("gold/small_deform2_inner_tip.csv")
plt.plot(mr[0], mr[1], 'b^', label = 'MOOSE (inner_tip)')
plt.plot(expected("lode_zero", angles)[0], expected("lode_zero", angles)[1], 'c-', label = 'expected (lode_zero)')
mr = moose_result("gold/small_deform2_lode_zero.csv")
plt.plot(mr[0], mr[1], 'c^', label = 'MOOSE (lode_zero)')
plt.plot(expected("inner_edge", angles)[0], expected("inner_edge", angles)[1], 'r-', label = 'expected (inner_edge)')
mr = moose_result("gold/small_deform2_inner_edge.csv")
plt.plot(mr[0], mr[1], 'r^', label = 'MOOSE (inner_edge)')
legend = plt.legend(bbox_to_anchor=(1.16, 0.95))
for label in legend.get_texts():
label.set_fontsize('small')
plt.xlabel("Stress")
plt.ylabel("Stress")
plt.title("Drucker-Prager yield function on octahedral plane")
plt.axis([5, 25, -12, 12])
plt.savefig("small_deform2.png")
sys.exit(0)
| lgpl-2.1 |
jaeilepp/eggie | mne/time_frequency/tfr.py | 1 | 34411 | """A module which implements the continuous wavelet transform
with complex Morlet wavelets.
Author : Alexandre Gramfort, [email protected] (2011)
License : BSD 3-clause
inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
"""
from math import sqrt
from copy import deepcopy
import numpy as np
from scipy import linalg
from scipy.fftpack import fftn, ifftn
from ..fixes import partial
from ..baseline import rescale
from ..parallel import parallel_func
from ..utils import logger, verbose
from ..channels import ContainsMixin, PickDropChannelsMixin
from ..io.pick import pick_info, pick_types
from ..utils import deprecated
def morlet(Fs, freqs, n_cycles=7, sigma=None, zero_mean=False):
"""Compute Wavelets for the given frequency range
Parameters
----------
Fs : float
Sampling Frequency
freqs : array
frequency range of interest (1 x Frequencies)
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
sigma : float, (optional)
It controls the width of the wavelet ie its temporal
resolution. If sigma is None the temporal resolution
is adapted with the frequency like for all wavelet transform.
The higher the frequency the shorter is the wavelet.
If sigma is fixed the temporal resolution is fixed
like for the short time Fourier transform and the number
of oscillations increases with the frequency.
zero_mean : bool
Make sure the wavelet is zero mean
Returns
-------
Ws : list of array
Wavelets time series
"""
Ws = list()
n_cycles = np.atleast_1d(n_cycles)
if (n_cycles.size != 1) and (n_cycles.size != len(freqs)):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
# fixed or scale-dependent window
if sigma is None:
sigma_t = this_n_cycles / (2.0 * np.pi * f)
else:
sigma_t = this_n_cycles / (2.0 * np.pi * sigma)
# this scaling factor is proportional to (Tallon-Baudry 98):
# (sigma_t*sqrt(pi))^(-1/2);
t = np.arange(0, 5 * sigma_t, 1.0 / Fs)
t = np.r_[-t[::-1], t[1:]]
oscillation = np.exp(2.0 * 1j * np.pi * f * t)
gaussian_enveloppe = np.exp(-t ** 2 / (2.0 * sigma_t ** 2))
if zero_mean: # to make it zero mean
real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2)
oscillation -= real_offset
W = oscillation * gaussian_enveloppe
W /= sqrt(0.5) * linalg.norm(W.ravel())
Ws.append(W)
return Ws
def _centered(arr, newsize):
"""Aux Function to center data"""
# Return the center newsize portion of the array.
newsize = np.asarray(newsize)
currsize = np.array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _cwt_fft(X, Ws, mode="same"):
"""Compute cwt with fft based convolutions
Return a generator over signals.
"""
X = np.asarray(X)
# Precompute wavelets for given frequency range to save time
n_signals, n_times = X.shape
n_freqs = len(Ws)
Ws_max_size = max(W.size for W in Ws)
size = n_times + Ws_max_size - 1
# Always use 2**n-sized FFT
fsize = 2 ** int(np.ceil(np.log2(size)))
# precompute FFTs of Ws
fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
for i, W in enumerate(Ws):
if len(W) > n_times:
raise ValueError('Wavelet is too long for such a short signal. '
'Reduce the number of cycles.')
fft_Ws[i] = fftn(W, [fsize])
for k, x in enumerate(X):
if mode == "full":
tfr = np.zeros((n_freqs, fsize), dtype=np.complex128)
elif mode == "same" or mode == "valid":
tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
fft_x = fftn(x, [fsize])
for i, W in enumerate(Ws):
ret = ifftn(fft_x * fft_Ws[i])[:n_times + W.size - 1]
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
tfr[i, offset:(offset + sz)] = _centered(ret, sz)
else:
tfr[i, :] = _centered(ret, n_times)
yield tfr
def _cwt_convolve(X, Ws, mode='same'):
"""Compute time freq decomposition with temporal convolutions
Return a generator over signals.
"""
X = np.asarray(X)
n_signals, n_times = X.shape
n_freqs = len(Ws)
# Compute convolutions
for x in X:
tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
for i, W in enumerate(Ws):
ret = np.convolve(x, W, mode=mode)
if len(W) > len(x):
raise ValueError('Wavelet is too long for such a short '
'signal. Reduce the number of cycles.')
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
tfr[i, offset:(offset + sz)] = ret
else:
tfr[i] = ret
yield tfr
def cwt_morlet(X, Fs, freqs, use_fft=True, n_cycles=7.0, zero_mean=False):
"""Compute time freq decomposition with Morlet wavelets
Parameters
----------
X : array of shape [n_signals, n_times]
signals (one per line)
Fs : float
sampling Frequency
freqs : array
Array of frequencies of interest
use_fft : bool
Compute convolution with FFT or temoral convolution.
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
zero_mean : bool
Make sure the wavelets are zero mean.
Returns
-------
tfr : 3D array
Time Frequency Decompositions (n_signals x n_frequencies x n_times)
"""
mode = 'same'
# mode = "valid"
n_signals, n_times = X.shape
n_frequencies = len(freqs)
# Precompute wavelets for given frequency range to save time
Ws = morlet(Fs, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
if use_fft:
coefs = _cwt_fft(X, Ws, mode)
else:
coefs = _cwt_convolve(X, Ws, mode)
tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr
return tfrs
def cwt(X, Ws, use_fft=True, mode='same', decim=1):
"""Compute time freq decomposition with continuous wavelet transform
Parameters
----------
X : array of shape [n_signals, n_times]
signals (one per line)
Ws : list of array
Wavelets time series
use_fft : bool
Use FFT for convolutions
mode : 'same' | 'valid' | 'full'
Convention for convolution
decim : int
Temporal decimation factor
Returns
-------
tfr : 3D array
Time Frequency Decompositions (n_signals x n_frequencies x n_times)
"""
n_signals, n_times = X[:, ::decim].shape
n_frequencies = len(Ws)
if use_fft:
coefs = _cwt_fft(X, Ws, mode)
else:
coefs = _cwt_convolve(X, Ws, mode)
tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr[..., ::decim]
return tfrs
def _time_frequency(X, Ws, use_fft):
"""Aux of time_frequency for parallel computing over channels
"""
n_epochs, n_times = X.shape
n_frequencies = len(Ws)
psd = np.zeros((n_frequencies, n_times)) # PSD
plf = np.zeros((n_frequencies, n_times), dtype=np.complex) # phase lock
mode = 'same'
if use_fft:
tfrs = _cwt_fft(X, Ws, mode)
else:
tfrs = _cwt_convolve(X, Ws, mode)
for tfr in tfrs:
tfr_abs = np.abs(tfr)
psd += tfr_abs ** 2
plf += tfr / tfr_abs
return psd, plf
@verbose
def single_trial_power(data, Fs, frequencies, use_fft=True, n_cycles=7,
baseline=None, baseline_mode='ratio', times=None,
decim=1, n_jobs=1, zero_mean=False, verbose=None):
"""Compute time-frequency power on single epochs
Parameters
----------
data : array of shape [n_epochs, n_channels, n_times]
The epochs
Fs : float
Sampling rate
frequencies : array-like
The frequencies
use_fft : bool
Use the FFT for convolutions or not.
n_cycles : float | array of float
Number of cycles in the Morlet wavelet. Fixed number
or one per frequency.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
baseline_mode : None | 'ratio' | 'zscore'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
times : array
Required to define baseline
decim : int
Temporal decimation factor
n_jobs : int
The number of epochs to process at the same time
zero_mean : bool
Make sure the wavelets are zero mean.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : 4D array
Power estimate (Epochs x Channels x Frequencies x Timepoints).
"""
mode = 'same'
n_frequencies = len(frequencies)
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(Fs, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
parallel, my_cwt, _ = parallel_func(cwt, n_jobs)
logger.info("Computing time-frequency power on single epochs...")
power = np.empty((n_epochs, n_channels, n_frequencies, n_times),
dtype=np.float)
# Package arguments for `cwt` here to minimize omissions where only one of
# the two calls below is updated with new function arguments.
cwt_kw = dict(Ws=Ws, use_fft=use_fft, mode=mode, decim=decim)
if n_jobs == 1:
for k, e in enumerate(data):
power[k] = np.abs(cwt(e, **cwt_kw)) ** 2
else:
# Precompute tf decompositions in parallel
tfrs = parallel(my_cwt(e, **cwt_kw) for e in data)
for k, tfr in enumerate(tfrs):
power[k] = np.abs(tfr) ** 2
# Run baseline correction. Be sure to decimate the times array as well if
# needed.
if times is not None:
times = times[::decim]
power = rescale(power, times, baseline, baseline_mode, copy=False)
return power
def _induced_power(data, Fs, frequencies, use_fft=True, n_cycles=7,
decim=1, n_jobs=1, zero_mean=False):
"""Compute time induced power and inter-trial phase-locking factor
The time frequency decomposition is done with Morlet wavelets
Parameters
----------
data : array
3D array of shape [n_epochs, n_channels, n_times]
Fs : float
sampling Frequency
frequencies : array
Array of frequencies of interest
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions.
n_cycles : float | array of float
Number of cycles. Fixed number or one per frequency.
decim: int
Temporal decimation factor
n_jobs : int
The number of CPUs used in parallel. All CPUs are used in -1.
Requires joblib package.
zero_mean : bool
Make sure the wavelets are zero mean.
Returns
-------
power : 2D array
Induced power (Channels x Frequencies x Timepoints).
Squared amplitude of time-frequency coefficients.
phase_lock : 2D array
Phase locking factor in [0, 1] (Channels x Frequencies x Timepoints)
"""
n_frequencies = len(frequencies)
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(Fs, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
if n_jobs == 1:
psd = np.empty((n_channels, n_frequencies, n_times))
plf = np.empty((n_channels, n_frequencies, n_times), dtype=np.complex)
for c in range(n_channels):
X = data[:, c, :]
this_psd, this_plf = _time_frequency(X, Ws, use_fft)
psd[c], plf[c] = this_psd[:, ::decim], this_plf[:, ::decim]
else:
parallel, my_time_frequency, _ = parallel_func(_time_frequency, n_jobs)
psd_plf = parallel(my_time_frequency(np.squeeze(data[:, c, :]),
Ws, use_fft)
for c in range(n_channels))
psd = np.zeros((n_channels, n_frequencies, n_times))
plf = np.zeros((n_channels, n_frequencies, n_times), dtype=np.complex)
for c, (psd_c, plf_c) in enumerate(psd_plf):
psd[c, :, :], plf[c, :, :] = psd_c[:, ::decim], plf_c[:, ::decim]
psd /= n_epochs
plf = np.abs(plf) / n_epochs
return psd, plf
@deprecated("induced_power will be removed in release 0.9. Use "
"tfr_morlet instead.")
def induced_power(data, Fs, frequencies, use_fft=True, n_cycles=7,
decim=1, n_jobs=1, zero_mean=False):
"""Compute time induced power and inter-trial phase-locking factor
The time frequency decomposition is done with Morlet wavelets
Parameters
----------
data : array
3D array of shape [n_epochs, n_channels, n_times]
Fs : float
sampling Frequency
frequencies : array
Array of frequencies of interest
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions.
n_cycles : float | array of float
Number of cycles. Fixed number or one per frequency.
decim: int
Temporal decimation factor
n_jobs : int
The number of CPUs used in parallel. All CPUs are used in -1.
Requires joblib package.
zero_mean : bool
Make sure the wavelets are zero mean.
Returns
-------
power : 2D array
Induced power (Channels x Frequencies x Timepoints).
Squared amplitude of time-frequency coefficients.
phase_lock : 2D array
Phase locking factor in [0, 1] (Channels x Frequencies x Timepoints)
"""
return _induced_power(data, Fs, frequencies, use_fft=use_fft,
n_cycles=n_cycles, decim=decim, n_jobs=n_jobs,
zero_mean=zero_mean)
def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB):
"""Aux Function to prepare tfr computation"""
from ..viz.utils import _setup_vmin_vmax
if mode is not None and baseline is not None:
logger.info("Applying baseline correction '%s' during %s" %
(mode, baseline))
data = rescale(data.copy(), times, baseline, mode)
# crop time
itmin, itmax = None, None
if tmin is not None:
itmin = np.where(times >= tmin)[0][0]
if tmax is not None:
itmax = np.where(times <= tmax)[0][-1]
times = times[itmin:itmax]
# crop freqs
ifmin, ifmax = None, None
if fmin is not None:
ifmin = np.where(freqs >= fmin)[0][0]
if fmax is not None:
ifmax = np.where(freqs <= fmax)[0][-1]
freqs = freqs[ifmin:ifmax]
# crop data
data = data[:, ifmin:ifmax, itmin:itmax]
times *= 1e3
if dB:
data = 20 * np.log10(data)
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
return data, times, freqs, vmin, vmax
# XXX : todo IO of TFRs
class AverageTFR(ContainsMixin, PickDropChannelsMixin):
"""Container for Time-Frequency data
Can for example store induced power at sensor level or intertrial
coherence.
Parameters
----------
info : Info
The measurement info.
data : ndarray, shape (n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
nave : int
The number of averaged TFRs.
Attributes
----------
ch_names : list
The names of the channels.
"""
@verbose
def __init__(self, info, data, times, freqs, nave, verbose=None):
self.info = info
if data.ndim != 3:
raise ValueError('data should be 3d. Got %d.' % data.ndim)
n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
self.data = data
self.times = times
self.freqs = freqs
self.nave = nave
@property
def ch_names(self):
return self.info['ch_names']
@verbose
def plot(self, picks, baseline=None, mode='mean', tmin=None, tmax=None,
fmin=None, fmax=None, vmin=None, vmax=None, cmap='RdBu_r',
dB=False, colorbar=True, show=True, verbose=None):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int
The indices of the channels to plot.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
layout : Layout | None
Layout instance specifying sensor positions. If possible, the
correct layout is inferred from the data.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas
show : bool
Call pyplot.show() at the end.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
from ..viz.topo import _imshow_tfr
import matplotlib.pyplot as plt
times, freqs = self.times.copy(), self.freqs.copy()
data = self.data[picks]
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB)
tmin, tmax = times[0], times[-1]
for k, p in zip(range(len(data)), picks):
plt.figure()
_imshow_tfr(plt, 0, tmin, tmax, vmin, vmax, ylim=None,
tfr=data[k: k + 1], freq=freqs, x_label='Time (ms)',
y_label='Frequency (Hz)', colorbar=colorbar,
picker=False, cmap=cmap)
if show:
import matplotlib.pyplot as plt
plt.show()
def plot_topo(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
layout=None, cmap='RdBu_r', title=None, dB=False,
colorbar=True, layout_scale=0.945, show=True):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot. If None all available
channels are displayed.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
layout : Layout | None
Layout instance specifying sensor positions. If possible, the
correct layout is inferred from the data.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
title : str
Title of the figure.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
show : bool
Call pyplot.show() at the end.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
from ..viz.topo import _imshow_tfr, _plot_topo
times = self.times.copy()
freqs = self.freqs
data = self.data
info = self.info
if picks is not None:
data = data[picks]
info = pick_info(info, picks)
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax,
mode, baseline, vmin, vmax, dB)
if layout is None:
from mne.layouts.layout import find_layout
layout = find_layout(self.info)
imshow = partial(_imshow_tfr, tfr=data, freq=freqs, cmap=cmap)
fig = _plot_topo(info=info, times=times,
show_func=imshow, layout=layout,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title, border='w',
x_label='Time (ms)', y_label='Frequency (Hz)')
if show:
import matplotlib.pyplot as plt
plt.show()
return fig
def _check_compat(self, tfr):
"""checks that self and tfr have the same time-frequency ranges"""
assert np.all(tfr.times == self.times)
assert np.all(tfr.freqs == self.freqs)
def __add__(self, tfr):
self._check_compat(tfr)
out = self.copy()
out.data += tfr.data
return out
def __iadd__(self, tfr):
self._check_compat(tfr)
self.data += tfr.data
return self
def __sub__(self, tfr):
self._check_compat(tfr)
out = self.copy()
out.data -= tfr.data
return out
def __isub__(self, tfr):
self._check_compat(tfr)
self.data -= tfr.data
return self
def copy(self):
"""Return a copy of the instance."""
return deepcopy(self)
def __repr__(self):
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", nave : %d" % self.nave
s += ', channels : %d' % self.data.shape[1]
return "<AverageTFR | %s>" % s
def apply_baseline(self, baseline, mode='mean'):
"""Baseline correct the data
Parameters
----------
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
"""
self.data = rescale(self.data, self.times, baseline, mode, copy=False)
def plot_topomap(self, tmin=None, tmax=None, fmin=None, fmax=None,
ch_type='mag', baseline=None, mode='mean',
layout=None, vmin=None, vmax=None, cmap='RdBu_r',
sensors='k,', colorbar=True, unit=None, res=64, size=2,
format='%1.1e', show_names=False, title=None,
axes=None, show=True):
"""Plot topographic maps of time-frequency intervals of TFR data
Parameters
----------
tfr : AvereageTFR
The AvereageTFR object.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses).
colorbar : bool
Plot a colorbar.
unit : str | None
The unit of the channel type used for colorbar labels.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
format : str
String format for colorbar values.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
axes : instance of Axes | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Call pyplot.show() at the end.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz import plot_tfr_topomap
return plot_tfr_topomap(self, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, ch_type=ch_type, baseline=baseline,
mode=mode, layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, sensors=sensors, colorbar=colorbar,
unit=unit, res=res, size=size, format=format,
show_names=show_names, title=title, axes=axes,
show=show)
def tfr_morlet(epochs, freqs, n_cycles, use_fft=False,
return_itc=True, decim=1, n_jobs=1):
"""Compute Time-Frequency Representation (TFR) using Morlet wavelets
Parameters
----------
epochs : Epochs
The epochs.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
use_fft : bool
The fft based convolution or not.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
decim : int
The decimation factor on the time axis. To reduce memory usage.
n_jobs : int
The number of jobs to eggie in parallel.
Returns
-------
power : AverageTFR
The averaged power.
itc : AverageTFR
The intertrial coherence (ITC). Only returned if return_itc
is True.
"""
data = epochs.get_data()
picks = pick_types(epochs.info, meg=True, eeg=True)
info = pick_info(epochs.info, picks)
data = data[:, picks, :]
power, itc = _induced_power(data, Fs=info['sfreq'], frequencies=freqs,
n_cycles=n_cycles, n_jobs=n_jobs,
use_fft=use_fft, decim=decim,
zero_mean=True)
times = epochs.times[::decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave)
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave))
return out
| bsd-2-clause |
chintak/scikit-image | skimage/viewer/plugins/color_histogram.py | 3 | 3248 | import numpy as np
import matplotlib.pyplot as plt
from skimage import color
from skimage import exposure
from .plotplugin import PlotPlugin
from ..canvastools import RectangleTool
class ColorHistogram(PlotPlugin):
name = 'Color Histogram'
def __init__(self, max_pct=0.99, **kwargs):
super(ColorHistogram, self).__init__(height=400, **kwargs)
self.max_pct = max_pct
print(self.help())
def attach(self, image_viewer):
super(ColorHistogram, self).attach(image_viewer)
self.rect_tool = RectangleTool(self.ax, on_release=self.ab_selected)
self._on_new_image(image_viewer.image)
def _on_new_image(self, image):
self.lab_image = color.rgb2lab(image)
# Calculate color histogram in the Lab colorspace:
L, a, b = self.lab_image.T
left, right = -100, 100
ab_extents = [left, right, right, left]
self.mask = np.ones(L.shape, bool)
bins = np.arange(left, right)
hist, x_edges, y_edges = np.histogram2d(a.flatten(), b.flatten(),
bins, normed=True)
self.data = {'bins': bins, 'hist': hist, 'edges': (x_edges, y_edges),
'extents': (left, right, left, right)}
# Clip bin heights that dominate a-b histogram
max_val = pct_total_area(hist, percentile=self.max_pct)
hist = exposure.rescale_intensity(hist, in_range=(0, max_val))
self.ax.imshow(hist, extent=ab_extents, cmap=plt.cm.gray)
self.ax.set_title('Color Histogram')
self.ax.set_xlabel('b')
self.ax.set_ylabel('a')
def help(self):
helpstr = ("Color Histogram tool:",
"Select region of a-b colorspace to highlight on image.")
return '\n'.join(helpstr)
def ab_selected(self, extents):
x0, x1, y0, y1 = extents
self.data['extents'] = extents
lab_masked = self.lab_image.copy()
L, a, b = lab_masked.T
self.mask = ((a > y0) & (a < y1)) & ((b > x0) & (b < x1))
lab_masked[..., 1:][~self.mask.T] = 0
self.image_viewer.image = color.lab2rgb(lab_masked)
def output(self):
"""Return the image mask and the histogram data.
Returns
-------
mask : array of bool, same shape as image
The selected pixels.
data : dict
The data describing the histogram and the selected region.
Keys:
- 'bins' : array of float, the bin boundaries for both
`a` and `b` channels.
- 'hist' : 2D array of float, the normalized histogram.
- 'edges' : tuple of array of float, the bin edges
along each dimension
- 'extents' : tuple of float, the left and right and
top and bottom of the selected region.
"""
return (self.mask, self.data)
def pct_total_area(image, percentile=0.80):
"""Return threshold value based on percentage of total area.
The specified percent of pixels less than the given intensity threshold.
"""
idx = int((image.size - 1) * percentile)
sorted_pixels = np.sort(image.flat)
return sorted_pixels[idx]
| bsd-3-clause |
js7558/pyBinance | tests/test-getAllOrders.py | 1 | 2412 | #!/usr/bin/python
import pandas as pd
import sys
sys.path.append('../')
from Binance import Binance
import logging.config
import logging.handlers
import logging
import os
# this logging configuration is sketchy
binance = logging.getLogger(__name__)
logging.config.fileConfig('logging.ini')
# create Binance object
bn = Binance()
# set keys
bn.setSecretKey('NhqPtmdSJYdKjVHjA7PZj4Mge3R5YNiP1e3UZjInClVN65XAbvqqM6A7H5fATj0j')
bn.setAPIKey('vmPUZE6mv9SD5VNHk4HlWFsOr6aKE2zvsw0MuIgwCIPy6utIco14y7Ju91duEh8A')
# getAllOrders
print "---------------- getAllOrders --------------"
print "################################# POSITIVE TESTS (returns 1 or r) ###################"
queryParams = {'symbol':'SALTBTC'}
print "****test valid mandatory input symbol, timestamp autogenerated"
test = bn.getAllOrders(queryParams)
print
queryParams = {'symbol':'SALTBTC','timestamp':1507770491000}
print "****test valid mandatory input symbol, timestamp supplied"
test = bn.getAllOrders(queryParams)
print
queryParams = {'symbol':'SALTBTC','recvWindow':123435234,'limit':8}
print "****test valid mandatory input symbol, timestamp autogenerated, optional params"
test = bn.getAllOrders(queryParams)
print
queryParams = {'symbol':'SALTBTC','timestamp':1507770491000,'recvWindow':123435234,'orderId':12345678}
print "****test valid mandatory input symbol, timestamp supplied, optional params"
test = bn.getAllOrders(queryParams)
print
print "################################# NEGATIVE TESTS (returns 0) ###################"
print
queryParams = {'recvWindow':112234}
print "****test valid optional inputs, valid parameter missing"
test = bn.getAllOrders(queryParams)
print
queryParams = {'symbol':12.5,'orderId':3334344}
print "****test valid mandatory inputs present with invalid type"
test = bn.getAllOrders(queryParams)
print
queryParams = {'symbol':'SALTBTC','recvWindow':'123456778','timestamp':150774295}
print "****test valid mandatory inputs, invalid user proved timestamp, plus some optional"
test = bn.getAllOrders(queryParams)
print
queryParams = {'symbol':'ETHBTC','timestamp':'abcdefghijklm'}
print "****test valid mandatory inputs, invalid user proved timestamp type but length ok, plus some optional"
test = bn.getAllOrders(queryParams)
print
queryParams = {'symbol':'ETHBTC','sharkbite':'abcdefghijklm'}
print "****test valid mandatory inputs, random input invalid value"
test = bn.getAllOrders(queryParams)
print
| mit |
ChinmaiRaman/phys227-final | final.py | 1 | 6752 | #! /usr/bin/env python
"""
File: final.py
Copyright (c) 2016 Chinmai Raman
License: MIT
Course: PHYS227
Assignment: Final
Date: May 21, 2016
Email: [email protected]
Name: Chinmai Raman
Description: Final
"""
from __future__ import division
from unittest import TestCase
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
class Rossler():
def __init__(self, c, dt = 0.001, T0 = 250, T = 500):
self.dt = float(dt)
self.T = float(T)
self.T0 = T0
self.c = float(c)
self.t = np.linspace(0.0, self.T, self.T / self.dt)
self.x = np.zeros(len(self.t))
self.y = np.zeros(len(self.t))
self.z = np.zeros(len(self.t))
self.x0 = 0
self.y0 = 0
self.z0 = 0
def f1(self, x, y, z, t):
return -1 * y - 1 * z
def f2(self, x, y, z, t):
return x + 0.2 * y
def f3(self, x, y, z, t):
return 0.2 + z * (x - self.c)
def run(self):
"""
Implements the fourth order Runge-Kutta method of differentiation.
"""
dt = self.dt
x = self.x
y = self.y
z = self.z
t = self.t
f1 = self.f1
f2 = self.f2
f3 = self.f3
for i in np.arange(0, len(t) - 1):
k1_x = dt * f1(x[i], y[i], z[i], t[i])
k1_y = dt * f2(x[i], y[i], z[i], t[i])
k1_z = dt * f3(x[i], y[i], z[i], t[i])
k2_x = dt * f1(x[i] + 0.5 * k1_x, y[i] + 0.5 * k1_y, z[i] + 0.5 * k1_z, t[i] + 0.5 * dt)
k2_y = dt * f2(x[i] + 0.5 * k1_x, y[i] + 0.5 * k1_y, z[i] + 0.5 * k1_z, t[i] + 0.5 * dt)
k2_z = dt * f3(x[i] + 0.5 * k1_x, y[i] + 0.5 * k1_y, z[i] + 0.5 * k1_z, t[i] + 0.5 * dt)
k3_x = dt * f1(x[i] + 0.5 * k2_x, y[i] + 0.5 * k2_y, z[i] + 0.5 * k2_z, t[i] + 0.5 * dt)
k3_y = dt * f2(x[i] + 0.5 * k2_x, y[i] + 0.5 * k2_y, z[i] + 0.5 * k2_z, t[i] + 0.5 * dt)
k3_z = dt * f3(x[i] + 0.5 * k2_x, y[i] + 0.5 * k2_y, z[i] + 0.5 * k2_z, t[i] + 0.5 * dt)
k4_x = dt * f1(x[i] + 0.5 * k3_x, y[i] + 0.5 * k3_y, z[i] + 0.5 * k3_z, t[i+1])
k4_y = dt * f2(x[i] + 0.5 * k3_x, y[i] + 0.5 * k3_y, z[i] + 0.5 * k3_z, t[i+1])
k4_z = dt * f3(x[i] + 0.5 * k3_x, y[i] + 0.5 * k3_y, z[i] + 0.5 * k3_z, t[i+1])
x[i+1] = x[i] + (k1_x + 2 * k2_x + 2 * k3_x + k4_x) / 6
y[i+1] = y[i] + (k1_y + 2 * k2_y + 2 * k3_y + k4_y) / 6
z[i+1] = z[i] + (k1_z + 2 * k2_z + 2 * k3_z + k4_z) / 6
def plotx(self):
t = self.t
T = self.T
x = self.x
fig, ax = plt.subplots()
ax.grid(True)
plt.plot(t, x, 'b-')
plt.xlabel('t')
plt.ylabel('x(t)')
plt.title('x(t) vs t')
plt.show(fig)
plt.close(fig)
def ploty(self):
t = self.t
T = self.T
y = self.y
fig, ax = plt.subplots()
ax.grid(True)
plt.plot(t, y, 'b-')
plt.xlabel('t')
plt.ylabel('y(t)')
plt.title('y(t) vs t')
plt.show(fig)
plt.close(fig)
def plotz(self):
t = self.t
T = self.T
z = self.z
fig, ax = plt.subplots()
ax.grid(True)
plt.plot(t, z, 'b-')
plt.xlabel('t')
plt.ylabel('z(t)')
plt.title('z(t) vs t')
plt.show(fig)
plt.close(fig)
def plotxy(self):
t = self.t
T0 = self.T0
x = self.x[np.where(t >= T0)]
y = self.y[np.where(t >= T0)]
fig, ax = plt.subplots()
ax.grid(True)
plt.plot(x, y, 'b-')
plt.xlabel('x(t)')
plt.ylabel('y(t)')
plt.title('y(t) vs x(t)')
ax.axis([-12, 12, -12, 12])
plt.show(fig)
plt.close(fig)
def plotyz(self):
t = self.t
T0 = self.T0
y = self.y[np.where(t >= T0)]
z = self.z[np.where(t >= T0)]
fig, ax = plt.subplots()
ax.grid(True)
plt.plot(y, z, 'b-')
plt.xlabel('y(t)')
plt.ylabel('z(t)')
plt.title('z(t) vs y(t)')
ax.axis([-12, 12, 0, 25])
plt.show(fig)
plt.close(fig)
def plotxz(self):
t = self.t
T0 = self.T0
x = self.x[np.where(t >= T0)]
z = self.z[np.where(t >= T0)]
fig, ax = plt.subplots()
ax.grid(True)
plt.plot(x, z, 'b-')
plt.xlabel('x(t)')
plt.ylabel('z(t)')
plt.title('z(t) vs x(t)')
ax.axis([-12, 12, 0, 25])
plt.show(fig)
plt.close(fig)
def plotxyz(self):
t = self.t
T0 = self.T0
x = self.x[np.where(t >= T0)]
y = self.y[np.where(t >= T0)]
z = self.z[np.where(t >= T0)]
fig = plt.figure()
ax = fig.add_subplot(111, projection = '3d')
ax.grid(True)
plt.plot(x, y, z, 'b-')
plt.xlabel('x(t)')
plt.ylabel('y(t)')
ax.set_zlabel("z(t)")
plt.title('z(t) vs y(t) vs x(t)')
ax.axis([-12, 12, -12, 12])
ax.set_zlim((0, 25))
plt.show(fig)
plt.close(fig)
def findmaxima(c, dim):
"""
finds the local maxima of x given a particular c
"""
ros = Rossler(c)
ros.run()
if dim == 'x':
var = ros.x
elif dim == 'y':
var = ros.y
elif dim == 'z':
var = ros.z
values = var[np.where(ros.t >= ros.T0)]
local_max = values[np.where((np.r_[True, values[1:] > values[:-1]] & np.r_[values[:-1] > values[1:], True]) == True)]
return local_max[local_max > 0]
def plotmaxima(dim):
"""
plots local maxima of x,y, or z vs c
"""
c_values = np.linspace(2, 6, 41)
var = [findmaxima(c, dim)[-17:] for c in c_values]
fig = plt.figure(1)
plt.plot(c_values, [elem for elem in var], 'b-')
plt.xlabel('c')
plt.ylabel(dim)
plt.ylim([3,12])
plt.title(dim + ' local maxes vs. c')
plt.show()
class Test_Ros(TestCase):
def test_ros(self):
T = 500
dt = 0.001
x_test = dt * np.arange(0, T / dt)
y_test = dt * np.arange(0, T / dt)
z_test = dt * np.arange(0, T / dt)
def f1(x, y, z, t):
return 1
def f2(x, y, z, t):
return 1
def f3(x, y, z, t):
return 1
test = Rossler(2)
test.f1 = f1
test.f2 = f2
test.f3 = f3
test.run()
print test.x[-10:]
print x_test[-10:]
assert (abs(test.x - x_test) < 1e-3).all() and (abs(test.y - y_test) < 1e-3).all() and (abs(test.z - z_test) < 1e-3).all(), 'Failure' | mit |
JanetMatsen/meta4_bins_janalysis | compare_fauzi_bins/split_fasta_into_individual_bins.py | 1 | 3114 | import re
import sys
print(sys.path)
import pandas as pd
from Bio import SeqIO
import os
def lookup_filename(record):
bin = re.search('(Ga[0-9]+)_', record.id).group(1)
filename_array = bin_df[bin_df['bin'] == bin]['bin name'].values
# There is a problem if more than one file name matched.
assert len(filename_array) == 1, \
'need only 1 selected; had {}'.format(filename_array)
# return the file name for the match.
filename = filename_array[0]
# replace spaces with _
filename = filename.replace(" ", "_")
return filename
def make_filename(bin_name):
return os.path.join(dir_out, bin_name, '.fasta')
def erase_existing_file(file_name):
if os.path.isfile(file_name):
os.remove(file_name)
else:
print("file {} doesn't exist".format(file_name))
return
def recreate_bins():
# keep track of bins we have erased and started fresh
initialized_bins = []
for record in SeqIO.parse(open(file_in), "fasta"):
f_name = lookup_filename(record)
f_out = os.path.join(dir_out, f_name + '.fasta')
# if it isn't in initialized_bins, it doesn't exist or needs to be
# wiped.
if f_out not in initialized_bins:
# erase file so we don't cat onto an old one.
erase_existing_file(f_out)
initialized_bins.append(f_out)
else:
print('filename: {}'.format(f_out))
SeqIO.write([record], open(f_out, 'a'), "fasta")
if __name__ == '__main__':
support_dir = './support_files/'
if not os.path.exists(support_dir):
os.makedirs(support_dir)
# first extract the bin names
# replaces extract_names.sh, which searched for all contig names in
# /data/genome_bins.fasta and saved reults to /compare_bins/DNA_names.txt
# use os, not envoy this time. I haven't shown envoy to be good w/ Python3
os.system(
'ag --max-count 9999999 ">" '
'/gscratch/lidstrom/meta4_bins/data/genome_bins.fasta > '
'/gscratch/lidstrom/meta4_bins/janalysis/' # path continued
'compare_bins/support_files/DNA_names.txt')
# call summarise_bins to make bin_summary.csv
# summarise_bins.main()
# exec(open("./filename").read())
exec(open(support_dir + "summarise_bins.py").read())
# exec(open("./path/to/script.py").read(), globals())
# This will execute a script and put all it's global variables in the
# interpreter's global scope (the normal behavior in most other languages).
# make dir individual_bins
if not os.path.exists('./individual_bins'):
os.makedirs('./individual_bins')
bin_df = pd.read_csv(support_dir + '/bin_summary.csv')
usage = "usage: %prog fasta_file_in directory_out"
# parser = OptionParser(usage)
# (opts, args) = parser.parse_args()
dir_out = os.getcwd() + '/individual_bins'
file_in = '/gscratch/lidstrom/meta4_bins/data/genome_bins.fasta'
dir_out = './individual_bins'
recreate_bins()
# call bin_lengths.py
# Also reports tim
exec(open(support_dir + "bin_lengths.py").read())
| bsd-2-clause |
dandanvidi/in-vivo-enzyme-kinetics | scripts/class.py | 3 | 11632 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 25 17:28:17 2016
@author: dan
"""
import cPickle as pickle
import pandas as pd
from trees import Tree
import csv, re
from matplotlib_venn import venn2
import matplotlib.pyplot as plt
from copy import deepcopy
import numpy as np
import seaborn as sb
from collections import defaultdict
from cobra.io.sbml import create_cobra_model_from_sbml_file
from cobra.manipulation.modify import convert_to_irreversible
ppath = "../../proteomics-collection/"
proteomics = pd.DataFrame.from_csv(ppath+"meta_abundance[copies_fL].csv")
pFBA = pd.DataFrame.from_csv("../data/flux[mmol_gCDW_h].csv")
pFVA = pd.DataFrame.from_csv("../data/flux_variability_[mmol_gCDW_h].csv", header=[0,1]).T
protein_info = pd.read_csv('../data/protein_abundance_info.csv', sep='\t')
gc = pd.DataFrame.from_csv("../data/growth_conditions.csv")
#gc = gc[gc.reference=='Schmidt et al. 2015']
gr = gc['growth rate [h-1]'][gc.index]
fL_cell = gc['single cell volume [fL]'] /2 # fL (cell volumes are overestimated by a factor of 1.7)
fg_cell_old = pd.read_csv('../data/protein_abundance_[fg_cell].csv')
copies_cell_persist = pd.read_csv('../data/protein_abundance_persistors[copies_cell].csv')
model = create_cobra_model_from_sbml_file('../data/iJO1366.xml')
convert_to_irreversible(model)
rxns = {r.id:r for r in model.reactions}
def map_proteomics(df):
uni_to_b = {row[48:54]:row[0:5].split(';')[0].strip()
for row in open("../data/all_ecoli_genes.txt", 'r')}
df.replace(to_replace={'upid':uni_to_b}, inplace=True)
manual_replacememnts = {
'D0EX67':'b1107',
'D4HZR9':'b2755',
'P00452-2':'b2234',
'P02919-2':'b0149',
'Q2A0K9':'b2011',
'Q5H772':'b1302',
'Q5H776':'b1298',
'Q5H777':'b1297',
'Q6E0U3':'b3183'}
df.replace(to_replace={'upid':manual_replacememnts}, inplace=True)
df.set_index('upid', inplace=True)
df.index.name = 'bnumber'
not_identified = ['B8LFD5','D8FH86','D9IX93','E1MTY0','P0CE60','P23477']
df.drop(not_identified, axis=0, inplace=True)
df.sort_index(inplace=True)
def genes_by_function(name):
tree = Tree.FromTMS(open('../data/KO_gene_hierarchy_general.tms', 'r'), 4)
f_KEGG = tree.GetNode(name).children
reader = csv.reader(open('../data/eco_mapping.csv', 'r'), delimiter='\t')
b_to_KEGG = {row[0]:row[2] for row in reader}
return {b for b,ko in b_to_KEGG.iteritems() if ko in f_KEGG}
def convert_copies_fL_to_mmol_gCDW(copies_fL):
rho = 1100 # average cell density gr/liter
DW_fraction = 0.3 # fraction of DW of cells
Avogadro = 6.02214129 # Avogadro's number "exponent-less"
mmol_L = copies_fL / (Avogadro*1e5)
mmol_gCDW = mmol_L / (rho * DW_fraction)
return mmol_gCDW
def convert_mmol_gCDW_to_mg_gCDW(mmol_gCDW):
protein_info = pd.DataFrame.from_csv('../data/ecoli_genome_info.tsv', sep='\t')
protein_g_mol = protein_info['molecular_weight[Da]']
mg_gCDW = mmol_gCDW.mul(protein_g_mol,axis=0)
mg_gCDW.replace(np.nan, 0, inplace=True)
return mg_gCDW
def get_complex_molecular_weight(model):
complexes = pd.DataFrame.from_csv('../data/enzyme_complexes.csv')
comp = list(complexes['Gene composition'].values)
comp = [dict(zip(re.findall(r"b[0-9]+", s),re.findall(r"\(([0-9]+)\)", s))) for s in comp]
protein_info = pd.DataFrame.from_csv('../data/ecoli_genome_info.tsv', sep='\t')
protein_g_mol = protein_info['molecular_weight[Da]']
all_genes = defaultdict(list)
for s in comp:
for k,v in s.iteritems():
all_genes[k].append(float(v))
for bnumber in protein_g_mol.index:
if bnumber not in all_genes.keys():
all_genes[bnumber].append(1.0)
subunit_comp = {k:np.mean(v) for k,v in all_genes.iteritems()}
r_to_weights = {}
for r in model.reactions:
isozymes = r.gene_reaction_rule.split('or')
isozymes = [re.findall(r"b[0-9]+", iso) for iso in isozymes]
weights = [sum([subunit_comp[b]*protein_g_mol[b] if b in protein_g_mol.index else np.nan
for b in iso]) for iso in isozymes]
r_to_weights[r.id] = np.mean(weights)
return r_to_weights
def convert_copies_fL_to_mg_gCDW(E):
tmp = convert_copies_fL_to_mmol_gCDW(E)
return convert_mmol_gCDW_to_mg_gCDW(tmp)
def get_umol_gCDW_min_from_pFVA(pFVA):
conds = pFVA.index.levels[0]
x = pFVA.loc[[(c, 'maximum') for c in conds]]
x.set_index(conds, inplace=True)
x = x[x>1e-10]
return (x * 1000) / 60
def gene_to_flux_carrying_rxns(V,model,use_cache=False):
if use_cache:
with open('../cache/genes_to_flux_carrying_reactions.p', 'rb') as fp:
return pickle.load(fp)
out = {}
for c in V.columns:
out[c] = {}
vc = V[c]
vc = vc[vc>0]
for g in model.genes:
rxns = {r.id for r in list(g.reactions)} & set(vc.index)
if len(rxns)>0:
out[c][g.id] = rxns
with open('../cache/genes_to_flux_carrying_reactions.p', 'wb') as fp:
pickle.dump(out, fp)
return out
def convert_SA_to_kcat(SA, MW):
# MW in units of kDa
return SA.mul(MW) / 60
def flux_carrying_reactions_to_enzymes(V,E,model,use_cache=False):
if use_cache:
with open('../cache/flux_carrying_reactions_to_enzymes.p', 'rb') as fp:
return pickle.load(fp)
try:
V = V.drop('flux_counter')
except ValueError:
print "flux couter already removed"
mapper = {}
for c in V.columns:
mapper[c] = {}
#use only flux carrying reactions in a given condition
vc = V[c]
vc = vc[vc>0]
for rid in vc.index:
r = model.reactions.get_by_id(rid)
genes = {g.id:g for g in r.genes}
# annoing gene in the model - just ignore the reaction it carries
if 's0001' in genes: continue
mapper[c][r.id] = {}
for i, (gid, g) in enumerate(genes.iteritems()):
rxns = {r.id for r in list(g.reactions)} & set(vc.index)
mapper[c][rid][gid] = float(len(rxns))
with open('../cache/flux_carrying_reactions_to_enzymes.p', 'wb') as fp:
pickle.dump(mapper, fp)
return mapper
def specific_activity(V,E,model):
mapper = flux_carrying_reactions_to_enzymes(V,E,model)
V = V.to_dict()
E = E.to_dict()
SA = {}
for c,reactions in V.iteritems():
SA[c] = {}
for r,v in reactions.iteritems():
if r in mapper[c]:
genes = mapper[c][r]
abundance = E[c]
weight = sum([abundance[e] / genes[e] for e in genes])
if np.isfinite(weight) and weight > 0:
SA[c][r] = V[c][r] / weight
else:
SA[c][r] = np.nan
SA = pd.DataFrame.from_dict(SA)
return SA
def enzyme_capacity_usage(SA):
kmax = SA.max(axis=1)
return SA.div(kmax,axis=0)
def metabolic_capacity(V,E,model):
tmp = gene_to_flux_carrying_rxns(V,model)
capacity = pd.Series({c:E.loc[tmp[c].keys()][c].sum() for c in V.columns})
return capacity
def metabolic_capacity_usage(V,E,model):
capacity = metabolic_capacity(V,E,model)
SA = specific_activity(V,E,model)
ECU = enzyme_capacity_usage(SA)
E = (V/SA).loc[SA.index]
return (ECU.mul(E)).sum() / capacity
def bootstrap_capacity_usage_error(V,E,model,iterations=10):
UC = pd.DataFrame(index=range(iterations),columns=V.columns)
for i in xrange(iterations):
newE = pd.DataFrame(index=E.index, columns=E.columns)
for c in V.columns:
x = E[c]
x = x[x>0]
rand = np.random.choice(x.values, len(x), replace=True)
newE[c][x.index] = rand
newE.replace(np.nan, 0, inplace=True)
UC.loc[i] = get_capacity_usage(V,newE,model)
return UC.std()
#def get_foldchange(V,E,gc):
#
# gr = gc['growth rate [h-1]']
#
# combs_all = [(i,j) for (i,j) in combinations(gc.index, 2) if gr[j] > gr[i]]
# delta_mu = pd.Series(data = map(lambda x: np.log2(gr[x[1]]/gr[x[0]]), combs_all),
# index = combs_all)
# delta_p = pd.DataFrame(index=reactions, columns=combs)
# delta_v = pd.DataFrame(index=reactions, columns=combs)
# for (i, j) in combs:
# delta_p[(i,j)] = np.log2(p[j] / p[i])
# delta_v[(i,j)] = np.log2(v[j] / v[i])
# return delta_p, delta_v, delta_mu
def get_surface_to_volume_ratio(length,width):
# cylinder + sphere
volume = np.pi*(length-width)*(width/2)**2 + 4/3*np.pi*(width/2)**3# um^3
surface = 2*np.pi*(length-width)*(width/2) + 4*np.pi*(width/2)**2# um^2
return surface, volume, surface/volume
def optimize_growth(model, cs):
rxns = {r.id:r for r in model.reactions}
rxns['EX_glc_e'].lower_bound = 0 # uptake of carbon source reaction is initialized
try:
rxns['EX_' + cs + '_e'].lower_bound = -1000 # redefine sole carbon source uptake reaction in mmol/gr/h
except KeyError:
print "%s is not in the model, using glucose instead" %cs
rxns['EX_glc_e'].lower_bound = -1000
rxns['Ec_biomass_iJO1366_core_53p95M'].objective_coefficient = 0
rxns['Ec_biomass_iJO1366_WT_53p95M'].objective_coefficient = 1
model.optimize()
return
def get_maximal_growth_rate(model, Vmax, condition):
Vmax = Vmax[condition].copy()
Vmax = Vmax.dropna()
Vmax = Vmax * 60 / 1000 # convert to units of mmol/gCDW/h
rxns = {r.id:r for r in model.reactions}
initial_bound = {}
for r in Vmax.index:
initial_bound[rxns[r]] = rxns[r].upper_bound
rxns[r].upper_bound = Vmax[r]
optimize_growth(model, gc['media_key'][condition])
for r,ub in initial_bound.iteritems():
r.upper_bound = ub
return model.solution.f
def get_rand_ECU(ECU,model):
reactions = [str(r) for r in model.reactions]
conds = ECU.columns
rand_ECU = pd.DataFrame(columns=conds, index=reactions)
for c in conds:
tmp = ECU[c].dropna()
rand_ECU[c] = np.random.gamma(tmp.mean(),tmp.std(),len(reactions))
return rand_ECU
'''
x = x.dropna()
w = w.dropna()
ix = x.index & w.index
x = x[ix].values
w = w[ix].values
Mw = np.zeros(1000)
for i in xrange(1000):
rand = np.random.choice(range(len(x)), len(x), replace=True)
newx = x[rand]
neww = w[rand]
Mw[i] = sum(newx*neww)/sum(neww)
return np.std(Mw)
'''
# print len(fva.keys())
# return fva
#map_proteomics(copies_cell_persist)
#map_proteomics(protein_info)
#map_proteomics(fg_cell_old)
#
#x = copies_cell_persist[new_conditions]
#y = copies_cell_persist['Protein molecular weight']
#fg_cell_persist = x.mul(y,axis=0) / (6.022*1e8)
#
#fg_cell = fg_cell_old.join(fg_cell_persist, how='outer')
#fg_fL = fg_cell.div(fL_cell)
#
#mg_gCDW = fg_fL[gr.index]/(1100/3)*1000 # cell density is 1100 g/L; DW fraction is 1/3
##mg_gCDW.to_csv('../data/mg_gCDW.csv')
##
#out = protein_info.join(mg_gCDW)
#out.to_csv('../data/protein_abundance[mg_gCDW].csv', sep='\t')
#plt.figure()
#ax = plt.axes()
#old = fg_cell_old.index
#new = copies_cell_persist.index
#venn2([old, new], set_labels=('Schmidt et al.', 'Persisters'),set_colors=('#4a6b8a','#801515'),ax=ax)
#plt.tight_layout()
#plt.savefig('../res/comparing coverage.svg')
| mit |
frank-tancf/scikit-learn | sklearn/manifold/setup.py | 99 | 1243 | import os
from os.path import join
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("manifold", parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_utils",
sources=["_utils.c"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
cblas_libs, blas_info = get_blas_info()
eca = blas_info.pop('extra_compile_args', [])
eca.append("-O4")
config.add_extension("_barnes_hut_tsne",
libraries=cblas_libs,
sources=["_barnes_hut_tsne.c"],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=eca, **blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
cbertinato/pandas | pandas/core/sparse/scipy_sparse.py | 1 | 6074 | """
Interaction with scipy.sparse matrices.
Currently only includes SparseSeries.to_coo helpers.
"""
from collections import OrderedDict
from pandas.core.index import Index, MultiIndex
from pandas.core.series import Series
def _check_is_partition(parts, whole):
whole = set(whole)
parts = [set(x) for x in parts]
if set.intersection(*parts) != set():
raise ValueError(
'Is not a partition because intersection is not null.')
if set.union(*parts) != whole:
raise ValueError('Is not a partition because union is not the whole.')
def _to_ijv(ss, row_levels=(0, ), column_levels=(1, ), sort_labels=False):
""" For arbitrary (MultiIndexed) SparseSeries return
(v, i, j, ilabels, jlabels) where (v, (i, j)) is suitable for
passing to scipy.sparse.coo constructor. """
# index and column levels must be a partition of the index
_check_is_partition([row_levels, column_levels], range(ss.index.nlevels))
# from the SparseSeries: get the labels and data for non-null entries
values = ss._data.internal_values()._valid_sp_values
nonnull_labels = ss.dropna()
def get_indexers(levels):
""" Return sparse coords and dense labels for subset levels """
# TODO: how to do this better? cleanly slice nonnull_labels given the
# coord
values_ilabels = [tuple(x[i] for i in levels)
for x in nonnull_labels.index]
if len(levels) == 1:
values_ilabels = [x[0] for x in values_ilabels]
# # performance issues with groupby ###################################
# TODO: these two lines can replace the code below but
# groupby is too slow (in some cases at least)
# labels_to_i = ss.groupby(level=levels, sort=sort_labels).first()
# labels_to_i[:] = np.arange(labels_to_i.shape[0])
def _get_label_to_i_dict(labels, sort_labels=False):
""" Return OrderedDict of unique labels to number.
Optionally sort by label.
"""
labels = Index(map(tuple, labels)).unique().tolist() # squish
if sort_labels:
labels = sorted(list(labels))
d = OrderedDict((k, i) for i, k in enumerate(labels))
return (d)
def _get_index_subset_to_coord_dict(index, subset, sort_labels=False):
ilabels = list(zip(*[index._get_level_values(i) for i in subset]))
labels_to_i = _get_label_to_i_dict(ilabels,
sort_labels=sort_labels)
labels_to_i = Series(labels_to_i)
if len(subset) > 1:
labels_to_i.index = MultiIndex.from_tuples(labels_to_i.index)
labels_to_i.index.names = [index.names[i] for i in subset]
else:
labels_to_i.index = Index(x[0] for x in labels_to_i.index)
labels_to_i.index.name = index.names[subset[0]]
labels_to_i.name = 'value'
return (labels_to_i)
labels_to_i = _get_index_subset_to_coord_dict(ss.index, levels,
sort_labels=sort_labels)
# #####################################################################
# #####################################################################
i_coord = labels_to_i[values_ilabels].tolist()
i_labels = labels_to_i.index.tolist()
return i_coord, i_labels
i_coord, i_labels = get_indexers(row_levels)
j_coord, j_labels = get_indexers(column_levels)
return values, i_coord, j_coord, i_labels, j_labels
def _sparse_series_to_coo(ss, row_levels=(0, ), column_levels=(1, ),
sort_labels=False):
"""
Convert a SparseSeries to a scipy.sparse.coo_matrix using index
levels row_levels, column_levels as the row and column
labels respectively. Returns the sparse_matrix, row and column labels.
"""
import scipy.sparse
if ss.index.nlevels < 2:
raise ValueError('to_coo requires MultiIndex with nlevels > 2')
if not ss.index.is_unique:
raise ValueError('Duplicate index entries are not allowed in to_coo '
'transformation.')
# to keep things simple, only rely on integer indexing (not labels)
row_levels = [ss.index._get_level_number(x) for x in row_levels]
column_levels = [ss.index._get_level_number(x) for x in column_levels]
v, i, j, rows, columns = _to_ijv(ss, row_levels=row_levels,
column_levels=column_levels,
sort_labels=sort_labels)
sparse_matrix = scipy.sparse.coo_matrix(
(v, (i, j)), shape=(len(rows), len(columns)))
return sparse_matrix, rows, columns
def _coo_to_sparse_series(A, dense_index: bool = False,
sparse_series: bool = True):
"""
Convert a scipy.sparse.coo_matrix to a SparseSeries.
Parameters
----------
A : scipy.sparse.coo.coo_matrix
dense_index : bool, default False
sparse_series : bool, default True
Returns
-------
Series or SparseSeries
Raises
------
TypeError if A is not a coo_matrix
"""
from pandas import SparseDtype
try:
s = Series(A.data, MultiIndex.from_arrays((A.row, A.col)))
except AttributeError:
raise TypeError('Expected coo_matrix. Got {} instead.'
.format(type(A).__name__))
s = s.sort_index()
if sparse_series:
# TODO(SparseSeries): remove this and the sparse_series keyword.
# This is just here to avoid a DeprecationWarning when
# _coo_to_sparse_series is called via Series.sparse.from_coo
s = s.to_sparse() # TODO: specify kind?
else:
s = s.astype(SparseDtype(s.dtype))
if dense_index:
# is there a better constructor method to use here?
i = range(A.shape[0])
j = range(A.shape[1])
ind = MultiIndex.from_product([i, j])
s = s.reindex(ind)
return s
| bsd-3-clause |
tiagoantao/AgeStructureNe | correctTrout.py | 1 | 1771 | from __future__ import division, print_function
import sys
from scipy import stats
from matplotlib import rc
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
rc('text', usetex=True)
import pylab
from trout import *
pref = sys.argv[1]
case = load_file(pref, 45)
def plot_model(fig, model):
bname = get_bname(model)
vals = []
ldnes = {}
errs = {}
labels = []
cnb = case['Newb']
nbks = sorted(list(Nbs.keys()), key=lambda x: x[1])
for cname, cdata in get_corrs(bname, [], []):
ldnes[cname] = []
errs[cname] = []
nobs = 0
for name, N0 in nbks:
if name != model:
continue
nobs += 1
labels.append(str(N0))
val = Nbs[(model, N0)]
ldne, ci = cnb[(model, N0)][None, 50, 100, 'SNP']
for cname, cdata in get_corrs(bname, ldne, ci):
cldne, ccis = cdata
hmean = stats.hmean([x if x > 0 else 10000 for x in cldne])
ldnes[cname].append(hmean)
err = hmean / val
errs[cname].append(err)
vals.append(val)
ax = fig.add_subplot(2, 1, 1)
ax.set_title("Nb and estimators %s" % bname)
ax.plot(vals, '+', label="Nb")
for name, lvals in list(ldnes.items()):
ax.plot(lvals, '-', label=name)
print(name)
print(vals)
print(lvals)
ax.set_xticklabels(labels)
ax.legend()
ax = fig.add_subplot(2, 1, 2)
ax.set_title("Fraction of error %s" % bname)
ax.plot([1.0] * nobs, '+', label="Nb")
for name, cvals in list(errs.items()):
ax.plot(cvals, '-', label=name)
ax.set_xticklabels(labels)
ax.legend()
fig.savefig("output/correct.png")
fig = pylab.figure(figsize=(10, 20))
plot_model(fig, 'bullt2')
| agpl-3.0 |
mjgrav2001/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 142 | 4467 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
OnePaaS/kafka | system_test/utils/metrics.py | 89 | 13937 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#!/usr/bin/env python
# ===================================
# file: metrics.py
# ===================================
import inspect
import json
import logging
import os
import signal
import subprocess
import sys
import traceback
import csv
import time
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from collections import namedtuple
import numpy
from pyh import *
import kafka_system_test_utils
import system_test_utils
logger = logging.getLogger("namedLogger")
thisClassName = '(metrics)'
d = {'name_of_class': thisClassName}
attributeNameToNameInReportedFileMap = {
'Min': 'min',
'Max': 'max',
'Mean': 'mean',
'50thPercentile': 'median',
'StdDev': 'stddev',
'95thPercentile': '95%',
'99thPercentile': '99%',
'999thPercentile': '99.9%',
'Count': 'count',
'OneMinuteRate': '1 min rate',
'MeanRate': 'mean rate',
'FiveMinuteRate': '5 min rate',
'FifteenMinuteRate': '15 min rate',
'Value': 'value'
}
def getCSVFileNameFromMetricsMbeanName(mbeanName):
return mbeanName.replace(":type=", ".").replace(",name=", ".") + ".csv"
def read_metrics_definition(metricsFile):
metricsFileData = open(metricsFile, "r").read()
metricsJsonData = json.loads(metricsFileData)
allDashboards = metricsJsonData['dashboards']
allGraphs = []
for dashboard in allDashboards:
dashboardName = dashboard['name']
graphs = dashboard['graphs']
for graph in graphs:
bean = graph['bean_name']
allGraphs.append(graph)
attributes = graph['attributes']
#print "Filtering on attributes " + attributes
return allGraphs
def get_dashboard_definition(metricsFile, role):
metricsFileData = open(metricsFile, "r").read()
metricsJsonData = json.loads(metricsFileData)
allDashboards = metricsJsonData['dashboards']
dashboardsForRole = []
for dashboard in allDashboards:
if dashboard['role'] == role:
dashboardsForRole.append(dashboard)
return dashboardsForRole
def ensure_valid_headers(headers, attributes):
if headers[0] != "# time":
raise Exception("First column should be time")
for header in headers:
logger.debug(header, extra=d)
# there should be exactly one column with a name that matches attributes
try:
attributeColumnIndex = headers.index(attributes)
return attributeColumnIndex
except ValueError as ve:
#print "#### attributes : ", attributes
#print "#### headers : ", headers
raise Exception("There should be exactly one column that matches attribute: {0} in".format(attributes) +
" headers: {0}".format(",".join(headers)))
def plot_graphs(inputCsvFiles, labels, title, xLabel, yLabel, attribute, outputGraphFile):
if not inputCsvFiles: return
# create empty plot
fig=plt.figure()
fig.subplots_adjust(bottom=0.2)
ax=fig.add_subplot(111)
labelx = -0.3 # axes coords
ax.set_xlabel(xLabel)
ax.set_ylabel(yLabel)
ax.grid()
#ax.yaxis.set_label_coords(labelx, 0.5)
Coordinates = namedtuple("Coordinates", 'x y')
plots = []
coordinates = []
# read data for all files, organize by label in a dict
for fileAndLabel in zip(inputCsvFiles, labels):
inputCsvFile = fileAndLabel[0]
label = fileAndLabel[1]
csv_reader = list(csv.reader(open(inputCsvFile, "rb")))
x,y = [],[]
xticks_labels = []
try:
# read first line as the headers
headers = csv_reader.pop(0)
attributeColumnIndex = ensure_valid_headers(headers, attributeNameToNameInReportedFileMap[attribute])
logger.debug("Column index for attribute {0} is {1}".format(attribute, attributeColumnIndex), extra=d)
start_time = (int)(os.path.getctime(inputCsvFile) * 1000)
int(csv_reader[0][0])
for line in csv_reader:
if(len(line) == 0):
continue
yVal = float(line[attributeColumnIndex])
xVal = int(line[0])
y.append(yVal)
epoch= start_time + int(line[0])
x.append(xVal)
xticks_labels.append(time.strftime("%H:%M:%S", time.localtime(epoch)))
coordinates.append(Coordinates(xVal, yVal))
p1 = ax.plot(x,y)
plots.append(p1)
except Exception as e:
logger.error("ERROR while plotting data for {0}: {1}".format(inputCsvFile, e), extra=d)
traceback.print_exc()
# find xmin, xmax, ymin, ymax from all csv files
xmin = min(map(lambda coord: coord.x, coordinates))
xmax = max(map(lambda coord: coord.x, coordinates))
ymin = min(map(lambda coord: coord.y, coordinates))
ymax = max(map(lambda coord: coord.y, coordinates))
# set x and y axes limits
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
# set ticks accordingly
xticks = numpy.arange(xmin, xmax, 0.2*xmax)
# yticks = numpy.arange(ymin, ymax)
plt.xticks(xticks,xticks_labels,rotation=17)
# plt.yticks(yticks)
plt.legend(plots,labels, loc=2)
plt.title(title)
plt.savefig(outputGraphFile)
def draw_all_graphs(metricsDescriptionFile, testcaseEnv, clusterConfig):
# go through each role and plot graphs for the role's metrics
roles = set(map(lambda config: config['role'], clusterConfig))
for role in roles:
dashboards = get_dashboard_definition(metricsDescriptionFile, role)
entities = kafka_system_test_utils.get_entities_for_role(clusterConfig, role)
for dashboard in dashboards:
graphs = dashboard['graphs']
# draw each graph for all entities
draw_graph_for_role(graphs, entities, role, testcaseEnv)
def draw_graph_for_role(graphs, entities, role, testcaseEnv):
for graph in graphs:
graphName = graph['graph_name']
yLabel = graph['y_label']
inputCsvFiles = []
graphLegendLabels = []
for entity in entities:
entityMetricsDir = kafka_system_test_utils.get_testcase_config_log_dir_pathname(testcaseEnv, role, entity['entity_id'], "metrics")
entityMetricCsvFile = entityMetricsDir + "/" + getCSVFileNameFromMetricsMbeanName(graph['bean_name'])
if(not os.path.exists(entityMetricCsvFile)):
logger.warn("The file {0} does not exist for plotting".format(entityMetricCsvFile), extra=d)
else:
inputCsvFiles.append(entityMetricCsvFile)
graphLegendLabels.append(role + "-" + entity['entity_id'])
# print "Plotting graph for metric {0} on entity {1}".format(graph['graph_name'], entity['entity_id'])
try:
# plot one graph per mbean attribute
labels = graph['y_label'].split(',')
fullyQualifiedAttributeNames = map(lambda attribute: graph['bean_name'] + ':' + attribute,
graph['attributes'].split(','))
attributes = graph['attributes'].split(',')
for labelAndAttribute in zip(labels, fullyQualifiedAttributeNames, attributes):
outputGraphFile = testcaseEnv.testCaseDashboardsDir + "/" + role + "/" + labelAndAttribute[1] + ".svg"
plot_graphs(inputCsvFiles, graphLegendLabels, graph['graph_name'] + '-' + labelAndAttribute[2],
"time", labelAndAttribute[0], labelAndAttribute[2], outputGraphFile)
# print "Finished plotting graph for metric {0} on entity {1}".format(graph['graph_name'], entity['entity_id'])
except Exception as e:
logger.error("ERROR while plotting graph {0}: {1}".format(outputGraphFile, e), extra=d)
traceback.print_exc()
def build_all_dashboards(metricsDefinitionFile, testcaseDashboardsDir, clusterConfig):
metricsHtmlFile = testcaseDashboardsDir + "/metrics.html"
centralDashboard = PyH('Kafka Metrics Dashboard')
centralDashboard << h1('Kafka Metrics Dashboard', cl='center')
roles = set(map(lambda config: config['role'], clusterConfig))
for role in roles:
entities = kafka_system_test_utils.get_entities_for_role(clusterConfig, role)
dashboardPagePath = build_dashboard_for_role(metricsDefinitionFile, role,
entities, testcaseDashboardsDir)
centralDashboard << a(role, href = dashboardPagePath)
centralDashboard << br()
centralDashboard.printOut(metricsHtmlFile)
def build_dashboard_for_role(metricsDefinitionFile, role, entities, testcaseDashboardsDir):
# build all dashboards for the input entity's based on its role. It can be one of kafka, zookeeper, producer
# consumer
dashboards = get_dashboard_definition(metricsDefinitionFile, role)
entityDashboard = PyH('Kafka Metrics Dashboard for ' + role)
entityDashboard << h1('Kafka Metrics Dashboard for ' + role, cl='center')
entityDashboardHtml = testcaseDashboardsDir + "/" + role + "-dashboards.html"
for dashboard in dashboards:
# place the graph svg files in this dashboard
allGraphs = dashboard['graphs']
for graph in allGraphs:
attributes = map(lambda attribute: graph['bean_name'] + ':' + attribute,
graph['attributes'].split(','))
for attribute in attributes:
graphFileLocation = testcaseDashboardsDir + "/" + role + "/" + attribute + ".svg"
entityDashboard << embed(src = graphFileLocation, type = "image/svg+xml")
entityDashboard.printOut(entityDashboardHtml)
return entityDashboardHtml
def start_metrics_collection(jmxHost, jmxPort, role, entityId, systemTestEnv, testcaseEnv):
logger.info("starting metrics collection on jmx port : " + jmxPort, extra=d)
jmxUrl = "service:jmx:rmi:///jndi/rmi://" + jmxHost + ":" + jmxPort + "/jmxrmi"
clusterConfig = systemTestEnv.clusterEntityConfigDictList
metricsDefinitionFile = systemTestEnv.METRICS_PATHNAME
entityMetricsDir = kafka_system_test_utils.get_testcase_config_log_dir_pathname(testcaseEnv, role, entityId, "metrics")
dashboardsForRole = get_dashboard_definition(metricsDefinitionFile, role)
mbeansForRole = get_mbeans_for_role(dashboardsForRole)
kafkaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfig, "entity_id", entityId, "kafka_home")
javaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfig, "entity_id", entityId, "java_home")
for mbean in mbeansForRole:
outputCsvFile = entityMetricsDir + "/" + mbean + ".csv"
startMetricsCmdList = ["ssh " + jmxHost,
"'JAVA_HOME=" + javaHome,
"JMX_PORT= " + kafkaHome + "/bin/kafka-run-class.sh kafka.tools.JmxTool",
"--jmx-url " + jmxUrl,
"--object-name " + mbean + " 1> ",
outputCsvFile + " & echo pid:$! > ",
entityMetricsDir + "/entity_pid'"]
startMetricsCommand = " ".join(startMetricsCmdList)
logger.debug("executing command: [" + startMetricsCommand + "]", extra=d)
system_test_utils.async_sys_call(startMetricsCommand)
time.sleep(1)
pidCmdStr = "ssh " + jmxHost + " 'cat " + entityMetricsDir + "/entity_pid' 2> /dev/null"
logger.debug("executing command: [" + pidCmdStr + "]", extra=d)
subproc = system_test_utils.sys_call_return_subproc(pidCmdStr)
# keep track of JMX ppid in a dictionary of entity_id to list of JMX ppid
# testcaseEnv.entityJmxParentPidDict:
# key: entity_id
# val: list of JMX ppid associated to that entity_id
# { 1: [1234, 1235, 1236], 2: [2234, 2235, 2236], ... }
for line in subproc.stdout.readlines():
line = line.rstrip('\n')
logger.debug("line: [" + line + "]", extra=d)
if line.startswith("pid"):
logger.debug("found pid line: [" + line + "]", extra=d)
tokens = line.split(':')
thisPid = tokens[1]
if entityId not in testcaseEnv.entityJmxParentPidDict:
testcaseEnv.entityJmxParentPidDict[entityId] = []
testcaseEnv.entityJmxParentPidDict[entityId].append(thisPid)
#print "\n#### testcaseEnv.entityJmxParentPidDict ", testcaseEnv.entityJmxParentPidDict, "\n"
def stop_metrics_collection(jmxHost, jmxPort):
logger.info("stopping metrics collection on " + jmxHost + ":" + jmxPort, extra=d)
system_test_utils.sys_call("ps -ef | grep JmxTool | grep -v grep | grep " + jmxPort + " | awk '{print $2}' | xargs kill -9")
def get_mbeans_for_role(dashboardsForRole):
graphs = reduce(lambda x,y: x+y, map(lambda dashboard: dashboard['graphs'], dashboardsForRole))
return set(map(lambda metric: metric['bean_name'], graphs))
| apache-2.0 |
AlexanderFabisch/scikit-learn | examples/datasets/plot_iris_dataset.py | 283 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
iled/gsimcli | GSIMCLI/parsers/costhome.py | 1 | 29601 | # -*- coding: utf-8 -*-
"""
This module provides tools to deal with data files in the COST-HOME format [1]_
See Also
--------
parsers.cost : initial version of this module. All functions will be refactored
to make use of these new classes.
References
----------
.. [1] Venema, V., & Mestre, O. (2010). The File Format for COST-HOME, 1–4.
Created on 28/01/2014
@author: julio
"""
import glob
import os
import re
import warnings
import numpy as np
import pandas as pd
import parsers.cost as pc
import tools.grid as gr
import tools.utils as ut
class Station(object):
"""Station container.
A station is basically a time series of a climate variable in a specific
location.
Attributes
----------
no_data : number
Missing data value.
path : string
File path.
network_id : int
Network ID number.
ftype : {'data', 'breakpoint', 'network', 'other'}
File type\:
- data: contains climate data
- breakpoint: contains detected irregularities
- network: contains stations' names and coordinates
- other: contains other information (e.g., graphics)
status : {'ra', 'qc', 'ho'}
Data file status\:
- ra: raw data
- qc: quality controlled (outliers removed)
- ho: homogenised data
variable : {'dd', 'ff', 'nn', 'tm', 'tn', 'tx', 'pp', 'rr', 'sd'}
Measured climate variable\:
- dd: wind direction
- ff: wind speed
- nn: cloud cover
- tm: mean temperature
- tn: minimum temperature
- tx: maximum temperature
- pp: pressure
- rr: precipitation
- sd: sunshine duration
resolution : {'y', 'm', 'd', 's', 'x'}
Time series resolution (data averaging period)\:
- y: yearly
- m: monthly
- d: daily
- s: subdaily
- x: other
id : int
Station ID number.
content : {'d', 'f', 'g', 'c'}
File content\:
- d: data, meteorological variables
- f: quality flags
- g: graphics and pictures
- c: corrections
Notes
-----
Some methods generate other attributes.
TODO: separate quality flag from data
"""
def __init__(self, path=None, no_data=-999.9, spec=None):
"""Initialise a Station instance.
Parameters
----------
path : string
File path.
spec : list or tuple
Wrapper # TODO: replace with kwargs
no_data : number
Missing data value.
"""
self.no_data = no_data
if path is not None and os.path.isfile(path):
self.path = path
self.network_id = os.path.basename(os.path.dirname(path))
spec = pc.filename_parse(path)
if spec:
(self.ftype, self.status, self.variable, self.resolution,
self.id, self.content) = spec
def check_monthly_order(self):
"""Make sure the data is stored in the correct monthly order.
"""
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
if all(month in months for month in self.data.columns):
self.data = self.data.reindex_axis(months, axis=1)
def load(self, path=None, content=None):
"""Load station data file.
Parameters
----------
path : string
File path.
content : string
File content.
Returns
-------
Sets attributes `data` or `quality`
data : pandas.DataFrame
Measured values.
quality : DataFrame
Quality flags.
Raises
------
ValueError
The given file was not parsed as a `data` file.
"""
if path is not None and os.path.isfile(path) and content:
if content == 'd':
self.data = pc.datafile(path, self.resolution, self.no_data)
elif content == 'f':
self.quality = pc.qualityfile(path, self.resolution)
self.path = path
elif self.ftype != 'data':
raise ValueError('The file {0} was not parsed as a data file.'.
format(self.path))
# FIXME: check if this is a problem; path optional?
elif self.content == 'd' and os.path.isfile(self.path):
self.data = pc.datafile(self.path, self.resolution, self.no_data)
elif self.content == 'f' and os.path.isfile(self.path):
self.quality = pc.qualityfile(self.path, self.resolution)
def load_outliers(self, path=None):
"""List the dates with detected outliers.
Parameters
----------
path : string, optional
Breakpoints file path.
Returns
-------
Sets attribute `outliers`
outliers : pandas.Series
Dates with detected outliers.
Notes
-----
The `breakpoints` file name must end with *detected.txt*.
"""
if path is not None and os.path.isfile(path):
detected_file = path
else:
path = os.path.dirname(self.path)
os.chdir(path)
try:
detected_file = glob.glob('*detected.txt')[0]
except:
raise os.error('breakpoints file not found in directory {0}'.
format(path))
detected = pc.breakpointsfile(detected_file)
select_station = detected["Station"].map(lambda x: self.id in x)
select_outlier = detected["Type"] == "OUTLIE"
self.outliers = detected[select_station & select_outlier].ix[:, 2:]
def match_orig(self, path=None):
"""Try to fetch the matching original station data.
Parameters
----------
path : string, optional
Path to the original station file. If not present, it will look for
a folder named *orig*.
Returns
-------
Sets attribute `orig`
orig : Station object
Instance of Station corresponding to the original station data.
See Also
--------
match_inho : equivalent but for inhomogenous data.
match_sub : fetch a matching station in a given submission.
"""
if path:
self.orig = Station(path, self.no_data)
if self.id != self.orig.id:
warnings.warn('mismatch between Station and ORIG IDs')
if self.network_id != self.orig.network_id:
warnings.warn('mismatch between Station and ORIG networks')
else:
self.orig = Station(match_sub(self.path, 'orig'), self.no_data)
def match_inho(self, path=None):
"""Try to fetch the matching inhomogenous station data.
Parameters
----------
path : string, optional
Path to the inhomogenous station file. If not present, it will look
for a folder named *inho*.
Returns
-------
Sets attribute `inho`
inho : Station object
Instance of Station corresponding to the inhomogenous station data.
See Also
--------
match_orig : equivalent but for original data.
match_sub : fetch a matching station in a given submission.
"""
if path:
self.inho = Station(path, self.no_data)
if self.id != self.inho.id:
warnings.warn('mismatch between Station and INHO IDs')
if self.network_id != self.inho.network_id:
warnings.warn('mismatch between Station and INHO networks')
else:
self.inho = Station(match_sub(self.path, 'inho'), self.no_data)
def yearly(self, func='mean'):
"""Upscale data resolution to yearly.
Parameters
----------
func : {'mean', 'sum'}
- mean: mean of the values
- sum: sum of the values
Returns
-------
ndarray
TODO: check when resolution != monthly
"""
if not hasattr(self, 'data'):
raise ValueError('no loaded data')
if func == 'mean':
return self.data.mean(axis=1)
elif func == 'sum':
return self.data.sum(axis=1)
def setup(self, outliers=False, inho=False, orig_path=None,
inho_path=None):
"""Load station homogenised data, original and outliers.
Parameters
----------
outliers : boolean, default False
Load corresponding outliers.
inho : boolean, default False
Load corresponding inhomogenous data.
orig_path : string, optional
Path to the original station file.
inho_path : string, optional
Path to the inhomogenous station file.
Returns
-------
Set attributes `outliers`, `orig` and `inho`.
See Also
--------
load : load data.
load_outliers : load outliers.
match_orig : fetch corresponding original data.
match_inho : fetch corresponding inhomogenous.
"""
if not hasattr(self, 'data'):
self.load()
if not hasattr(self, 'orig'):
self.match_orig(orig_path)
self.orig.load()
if outliers and not hasattr(self, 'outliers'):
if 'orig' not in self.path.lower():
warnings.warn('loading outliers from non ORIG submission')
self.load_outliers()
if inho and not hasattr(self, 'inho'):
self.match_inho(inho_path)
def save(self, path):
"""Write station data in the COST-HOME format (tab separated values,
float numbers with one decimal value).
Parameters
----------
path : string
File path.
"""
self.path = path
self.load()
filename = (self.status + self.variable + self.resolution + self.id +
self.content + '.txt')
self.check_monthly_order()
self.data.to_csv(os.path.join(path, filename), sep='\t', header=False,
float_format='%6.1f')
def skip_outliers(self, yearly=True):
"""Replaced by NaN the values marked as outliers in the original data.
If working with yearly data, it will delete the corresponding rows
instead.
"""
self.orig.load_outliers()
orig = self.orig.data
if yearly:
skip = list(np.unique(self.orig.outliers.Year))
orig = orig.select(lambda x: x not in skip)
else:
skip = self.orig.outliers
skip['Month'] = ut.number_to_month(skip['Month'])
for date in skip.itertuples(index=False):
orig.loc[date] = np.nan
class Network(object):
"""Network container.
A network is a set of stations. The same station can belong to different
networks.
Attributes
----------
no_data : number
Missing data value.
path : string
Network folder path.
id : int
Network ID number.
stations_id : list of int
Stations' ID numbers.
stations_spec : list of list
List wrapping a set of Station attributes.
stations_path : list of string
Stations file paths.
stations_number : int
Number of stations in the network.
"""
def __init__(self, path=None, no_data=-999.9, network_id=None):
"""Initialise a Network instance.
Parameters
----------
path : string, optional
Network folder path.
no_data : number, default -999.9
Missing data value.
network_id : int, optional
Network ID number.
Notes
-----
The current implementation is filtering files parsed as `data` type and
with content `d`.
TODO: handle other files besides data?
"""
self.no_data = no_data
self.path = path
self.id = network_id
self.stations_id = list()
self.stations_spec = list()
self.stations_path = list()
self.stations_number = 0
if path:
if ((isinstance(path, str) or isinstance(path, unicode)) and
os.path.isdir(path)):
parsed = pc.directory_walk_v1(path)
selected = pc.files_select(parsed, ftype='data', content='d')
else:
selected = path
self.id = selected[0][1][0]
self.stations_number = len(selected)
for station in selected:
self.stations_id.append(station[1][5])
self.stations_spec.append(station[1])
self.stations_path.append(station[0])
def load_stations(self):
"""Load all the stations in the network.
Notice that the data has to be explicitly loaded, the stations are just
being indexed to the network.
"""
self.stations = list()
for station in self.stations_path:
self.stations.append(Station(station, self.no_data))
def add(self, station):
"""Add a station to the network.
Parameters
----------
station : Station object
Instance of Station representing the station to add to the network.
"""
if not hasattr(self, 'stations'):
self.stations = list()
self.stations.append(station)
self.stations_id.append(station.id)
self.stations_spec.append([station.ftype, station.status,
station.variable, station.resolution,
station.id, station.content])
self.stations_path.append(station.path)
self.stations_number += 1
def average(self, orig=False, yearly=True):
"""Calculate the average climate variable value per year of all
stations in the network.
Parameters
----------
orig : boolean, default False
Calculate the same average for the corresponding original data.
yearly : boolean, default True
Average monthly data to yearly data.
Returns
-------
ndarray or list of ndarray
"""
self.setup()
first = True
for station in self.stations:
# station.setup()
if yearly:
homog_data = station.data.mean(axis=1)
if orig:
orig_data = station.orig.data.mean(axis=1)
else:
homog_data = station.data.copy()
if orig:
orig_data = station.orig.data.copy()
if first:
netw_average = homog_data.copy()
if orig:
orig_average = orig_data.copy()
first = False
continue
netw_average += homog_data
if orig:
# this will preserve missing data
orig_average += orig_data
netw_result = netw_average / self.stations_number
if orig:
result = [netw_result, orig_average / self.stations_number]
else:
result = netw_result
return result
def skip_outliers(self, orig_path=None, yearly=True):
"""Opt out the values marked as outliers in the original data, in each
station.
Parameters
----------
orig_path : string, optional
Path to the original station file.
yearly : boolean, default True
Average monthly data to yearly data.
"""
self.setup()
for station in self.stations:
station.setup(orig_path=orig_path)
station.skip_outliers(yearly)
def setup(self):
"""Load all stations in the network.
No option to load from a non default path.
"""
if not hasattr(self, 'stations'):
self.load_stations()
def save(self, path):
"""Write every station in the network according to the COST-HOME
format.
Parameters
----------
path : string
Folder path.
TODO: write network and breakpoints files.
"""
self.setup()
path = os.path.join(path, str(self.id))
if not os.path.exists(path):
os.mkdir(path)
for station in self.stations:
station.save(path)
def load_gsimcli(self, path, keys_path=None, ftype='data', status='xx',
variable='rr', resolution='r', content='c', yearly=True,
yearly_sum=False):
"""Load stations data from a file in the gsimcli format.
"""
if not yearly:
month = extract_month(path)
if yearly and yearly_sum:
div = 12.0
else:
div = 1.0
xlsfile = pd.ExcelFile(path)
xlstable = xlsfile.parse(sheetname='All stations', header=0,
na_values=self.no_data, index_col=0)
# filter out FLAG columns
data_cols = [label for label in xlstable.columns if '_clim' in label]
st_labels = [label.split('_')[0] for label in data_cols]
# convert station ID keys
if keys_path is not None and os.path.isfile(keys_path):
self.load_keys(keys_path)
station_ids = [str(self.keys.loc[int(stid)].values[0])
for stid in st_labels]
else:
station_ids = st_labels
for i, station_col in enumerate(data_cols):
stid = station_ids[i]
data = pd.DataFrame(xlstable[station_col] / div)
if not yearly:
data.columns = [month]
if stid in self.stations_id:
st = self.station(stid)
st.data = st.data.join(data)
else:
st = Station(no_data=self.no_data)
st.path = None
st.network_id = self.id
st.ftype = ftype
st.status = status
st.variable = variable
st.resolution = resolution
st.content = content
st.id = stid
st.data = data
self.add(st)
def load_keys(self, path):
"""Read a TSV file with the keys to the converted station IDs.
Parameters
----------
path : string
File path.
"""
self.keys = pd.read_csv(path, sep='\t', index_col=0)
def load_pointset(self, path, header=True, ftype='data', status='xx',
variable='vv', resolution='r', content='c',
year_col='year', station_col='est_id', var_col='value'):
"""Load station data from a file in the GSLIB format.
Parameters
----------
path : string or PointSet object
Full path to the PointSet file or instance of PointSet type
containing the observed values at the candidate station.
header : boolean, default True
True if the PointSet file has the GSLIB standard header lines.
ftype : {'data', 'breakpoint', 'network', 'other'}
File type.
status : {'ra', 'qc', 'ho'}
Data file status, default 'xx' (placeholder).
variable : {'dd', 'ff', 'nn', 'tm', 'tn', 'tx', 'pp', 'rr', 'sd'}
Measured climate variable, default 'vv' (placeholder).
resolution : {'y', 'm', 'd', 's', 'x'}
Time series resolution (data averaging period), default 'r'
(placeholder).
content : {'d', 'f', 'g', 'c'}
File content, default 'c' (placeholder).
year_col : string, default 'year'
Label of the column containing the time series yearly index.
station_col : string, default 'est_id'
Label of the column containing the stations' ID's.
var_col : string, default 'value'
Label of the column containing the climate data values.
See Also
--------
Station : Station class.
"""
if isinstance(path, gr.PointSet):
pset = path
else:
pset = gr.PointSet(psetpath=path, header=header)
pset.values.rename(columns={year_col: 'time', station_col:
'station', var_col: 'clim'},
inplace=True)
index = pset.values.time.unique().astype('int')
self.stations_id = list(pset.values.station.unique().astype('int'))
self.stations_number = len(self.stations_id)
self.stations = list()
for station_id in self.stations_id:
st_data = pd.Series(pset.values.clim
[pset.values.station == station_id].values,
index, name=variable)
st = Station(no_data=self.no_data)
st.data = st_data
st.id = format(station_id, '0=8.0f')
st.ftype = ftype
st.status = status
st.variable = variable
st.resolution = resolution
st.content = content
st.network_id = self.id
self.stations.append(st)
def station(self, stid):
"""Return the existing Station instance with the given ``stid`` ID.
"""
for st in self.stations:
if st.id == stid:
return st
def update_ids(self, keys_path=None):
"""Update every station ID according to the given keys.
Useful when stations' ID's were replaced with a different number (for
instance, because they were non numerical).
Parameters
----------
keys_path : string or pandas.Series, optional
File path or Series containing ID's and the corresponding keys.
"""
if keys_path is not None and os.path.isfile(keys_path):
self.load_keys(keys_path)
for i, station in enumerate(self.stations):
station.id = self.keys.loc[station.id]
self.stations_id[i] = station.id
class Submission(object):
"""Submission/Contribution to the COST-HOME benchark.
Each instance of Submission should refer to a unique climate signal
(temperature or precipitation).
Attributes
----------
path : string
Folder path.
no_data : number
Missing data value.
name : string
Submission's name.
signal : string
Submission's climate signal.
networks : list of Network object
Networks contained in the submission.
networks_id : list of int
Network ID numbers contained in the submission.
stations_number : int
Total number of station contained in the submission.
stations_id : list of int
Unique station ID numbers contained in the submission. ID's relative to
stations in different networks but with the same number count as one.
orig_path : string
Directory where the original station files are located.
inho_path : string
Directory where the inhomogeneous station files are located.
"""
def __init__(self, path=None, no_data=-999.9, networks_id=None,
orig_path=None, inho_path=None):
"""Initialise a Submission instance.
Parameters
----------
path : string, optional
Folder path.
no_data : number, default -999.9
Missing data value.
networks_id : list of int, optional
Network ID numbers contained in the submission.
orig_path : string, optional
Directory where the original station files are located.
inho_path : string, optional
Directory where the inhomogeneous station files are located.
Notes
-----
The current implementation is filtering files parsed as `data` type and
with content `d`.
"""
self.no_data = no_data
self.networks = list()
self.networks_id = list()
self.stations_number = 0
self.stations_id = list()
if path is not None:
self.load_dir(path, networks_id)
self.orig_path = orig_path
self.inho_path = inho_path
def add(self, network):
"""Add a network to the submission.
Parameters
----------
network : Network instance
Network to be added to the submission.
"""
self.networks.append(network)
self.networks_id.append(network.id)
self.stations_number += network.stations_number
self.stations_id.extend(network.stations_id)
self.stations_id = list(set(self.stations_id))
def load(self):
"""Load all networks included in the submission.
"""
for network in self.networks:
network.setup()
def load_dir(self, path, networks_id=None):
"""Load submission from a directory containing all the included
networks, one per folder. The data files should be in the COST-HOME
format.
Parameters
----------
path : string
Folder path.
networks_id : list of int, optional
Network ID numbers contained in the submission.
"""
self.path = path
self.name = os.path.basename(os.path.dirname(path))
self.signal = os.path.basename(path)
parsed = pc.directory_walk_v1(path)
selected = pc.files_select(parsed, network=networks_id,
ftype='data', content='d')
grouped = pc.agg_network(selected)
for network in grouped:
self.networks_id.append(network[0][1][0])
self.networks.append(Network(network, self.no_data))
self.stations_number += self.networks[-1].stations_number
self.stations_id.extend(self.networks[-1].stations_id)
self.stations_id = list(np.unique(self.stations_id))
def save(self, path):
"""Write all networks included in the submission, according to the
COST-HOME format.
Parameters
----------
path : string
Folder path.
"""
for network in self.networks:
network.save(path)
def setup(self, orig_path=None, inho_path=None):
"""Load all networks (and its stations) in the submission.
Set the original and the inhomogenised directories. Each one these
should contain a folder for each network in the submission, where each
network contains several stations.
Parameters
----------
orig_path : string, optional
Directory with the original files for the submission.
inho_path : string, optional
Directory with the inhomogised files for the submission.
"""
self.load()
if self.orig_path:
orig_path = self.orig_path
else:
self.orig_path = orig_path
if self.inho_path:
inho_path = self.inho_path
else:
self.inho_path = inho_path
if orig_path or inho_path:
for network in self.networks:
if orig_path:
orig_netw = os.path.join(orig_path, network.id)
if inho_path:
inho_netw = os.path.join(inho_path, network.id)
for station in network.stations:
# find file by id
file_pattern = os.sep + '*' + station.id + '*'
if orig_path:
orig_file = glob.glob(orig_netw + file_pattern)[0]
station.match_orig(orig_file)
if inho_path:
inho_file = glob.glob(inho_netw + file_pattern)[0]
station.match_inho(inho_file)
station.setup()
def extract_month(path):
"""Try to guess the month of a monthly gsimcli results file.
Will recognize text abbreviatures (e.g., apr, oct) and numeric indexes
(e.g., 04, 10).
"""
months = set(['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'])
filename = os.path.splitext(os.path.basename(path))[0]
names = re.split('\W+|_', filename)
names = {name.capitalize() for name in names}
return list(months & names)[0]
def match_sub(path, sub, level=3):
"""Try to fetch the matching `sub` station in a given submission.
Parameters
----------
path : string
Station file path.
sub : string
Intended corresponding station.
level : int, default 3
Number of levels in the directory tree to go up.
Returns
-------
match : string
Path to the matching station.
"""
subpath, signalpath = ut.path_up(path, level)
benchpath, subm = os.path.split(subpath) # @UnusedVariable
match = os.path.join(benchpath, sub, signalpath)
if not os.path.exists(match):
# try to match by station id
dirname, basename = os.path.split(match)
os.chdir(dirname)
match = os.path.join(dirname, glob.glob('*' + str(basename[2:]))[0])
if not os.path.isfile(match):
raise os.error('no such file: \'{0}\''.format(match))
return match
| gpl-3.0 |
carrillo/scikit-learn | examples/ensemble/plot_voting_decision_regions.py | 230 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
cogeorg/BlackRhino | examples/Georg2012/production/networkx/readwrite/tests/test_gml.py | 35 | 3099 | #!/usr/bin/env python
import io
from nose.tools import *
from nose import SkipTest
import networkx
class TestGraph(object):
@classmethod
def setupClass(cls):
global pyparsing
try:
import pyparsing
except ImportError:
try:
import matplotlib.pyparsing as pyparsing
except:
raise SkipTest('gml test: pyparsing not available.')
def setUp(self):
self.simple_data="""Creator me
graph [
comment "This is a sample graph"
directed 1
IsPlanar 1
pos [ x 0 y 1 ]
node [
id 1
label "Node 1"
pos [ x 1 y 1 ]
]
node [
id 2
pos [ x 1 y 2 ]
label "Node 2"
]
node [
id 3
label "Node 3"
pos [ x 1 y 3 ]
]
edge [
source 1
target 2
label "Edge from node 1 to node 2"
color [line "blue" thickness 3]
]
edge [
source 2
target 3
label "Edge from node 2 to node 3"
]
edge [
source 3
target 1 label
"Edge from node 3 to node 1"
]
]
"""
def test_parse_gml(self):
G=networkx.parse_gml(self.simple_data,relabel=True)
assert_equals(sorted(G.nodes()),\
['Node 1', 'Node 2', 'Node 3'])
assert_equals( [e for e in sorted(G.edges())],\
[('Node 1', 'Node 2'),
('Node 2', 'Node 3'),
('Node 3', 'Node 1')])
assert_equals( [e for e in sorted(G.edges(data=True))],\
[('Node 1', 'Node 2',
{'color': {'line': 'blue', 'thickness': 3},
'label': 'Edge from node 1 to node 2'}),
('Node 2', 'Node 3',
{'label': 'Edge from node 2 to node 3'}),
('Node 3', 'Node 1',
{'label': 'Edge from node 3 to node 1'})])
def test_read_gml(self):
import os,tempfile
(fd,fname)=tempfile.mkstemp()
fh=open(fname,'w')
fh.write(self.simple_data)
fh.close()
Gin=networkx.read_gml(fname,relabel=True)
G=networkx.parse_gml(self.simple_data,relabel=True)
assert_equals( sorted(G.nodes(data=True)), sorted(Gin.nodes(data=True)))
assert_equals( sorted(G.edges(data=True)), sorted(Gin.edges(data=True)))
os.close(fd)
os.unlink(fname)
def test_relabel_duplicate(self):
data="""
graph
[
label ""
directed 1
node
[
id 0
label "same"
]
node
[
id 1
label "same"
]
]
"""
fh = io.BytesIO(data.encode('UTF-8'))
fh.seek(0)
assert_raises(networkx.NetworkXError,networkx.read_gml,fh,relabel=True)
def test_bool(self):
G=networkx.Graph()
G.add_node(1,on=True)
G.add_edge(1,2,on=False)
data = '\n'.join(list(networkx.generate_gml(G)))
answer ="""graph [
node [
id 0
label 1
on 1
]
node [
id 1
label 2
]
edge [
source 0
target 1
on 0
]
]"""
assert_equal(data,answer)
| gpl-3.0 |
DonBeo/statsmodels | statsmodels/regression/_prediction.py | 27 | 6035 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 19 11:29:18 2014
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
from scipy import stats
# this is similar to ContrastResults after t_test, partially copied and adjusted
class PredictionResults(object):
def __init__(self, predicted_mean, var_pred_mean, var_resid,
df=None, dist=None, row_labels=None):
self.predicted_mean = predicted_mean
self.var_pred_mean = var_pred_mean
self.df = df
self.var_resid = var_resid
self.row_labels = row_labels
if dist is None or dist == 'norm':
self.dist = stats.norm
self.dist_args = ()
elif dist == 't':
self.dist = stats.t
self.dist_args = (self.df,)
else:
self.dist = dist
self.dist_args = ()
@property
def se_obs(self):
return np.sqrt(self.var_pred_mean + self.var_resid)
@property
def se_mean(self):
return np.sqrt(self.var_pred_mean)
def conf_int(self, obs=False, alpha=0.05):
"""
Returns the confidence interval of the value, `effect` of the constraint.
This is currently only available for t and z tests.
Parameters
----------
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
Returns
-------
ci : ndarray, (k_constraints, 2)
The array has the lower and the upper limit of the confidence
interval in the columns.
"""
se = self.se_obs if obs else self.se_mean
q = self.dist.ppf(1 - alpha / 2., *self.dist_args)
lower = self.predicted_mean - q * se
upper = self.predicted_mean + q * se
return np.column_stack((lower, upper))
def summary_frame(self, what='all', alpha=0.05):
# TODO: finish and cleanup
import pandas as pd
from statsmodels.compat.collections import OrderedDict
ci_obs = self.conf_int(alpha=alpha, obs=True) # need to split
ci_mean = self.conf_int(alpha=alpha, obs=False)
to_include = OrderedDict()
to_include['mean'] = self.predicted_mean
to_include['mean_se'] = self.se_mean
to_include['mean_ci_lower'] = ci_mean[:, 0]
to_include['mean_ci_upper'] = ci_mean[:, 1]
to_include['obs_ci_lower'] = ci_obs[:, 0]
to_include['obs_ci_upper'] = ci_obs[:, 1]
self.table = to_include
#OrderedDict doesn't work to preserve sequence
# pandas dict doesn't handle 2d_array
#data = np.column_stack(list(to_include.values()))
#names = ....
res = pd.DataFrame(to_include, index=self.row_labels,
columns=to_include.keys())
return res
def get_prediction(self, exog=None, transform=True, weights=None,
row_labels=None, pred_kwds=None):
"""
compute prediction results
Parameters
----------
exog : array-like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
weights : array_like, optional
Weights interpreted as in WLS, used for the variance of the predicted
residual.
args, kwargs :
Some models can take additional arguments or keywords, see the
predict method of the model for the details.
Returns
-------
prediction_results : instance
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and summary
tables for the prediction of the mean and of new observations.
"""
### prepare exog and row_labels, based on base Results.predict
if transform and hasattr(self.model, 'formula') and exog is not None:
from patsy import dmatrix
exog = dmatrix(self.model.data.design_info.builder,
exog)
if exog is not None:
if row_labels is None:
if hasattr(exog, 'index'):
row_labels = exog.index
else:
row_labels = None
exog = np.asarray(exog)
if exog.ndim == 1 and (self.model.exog.ndim == 1 or
self.model.exog.shape[1] == 1):
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
else:
exog = self.model.exog
if weights is None:
weights = getattr(self.model, 'weights', None)
if row_labels is None:
row_labels = getattr(self.model.data, 'row_labels', None)
# need to handle other arrays, TODO: is delegating to model possible ?
if weights is not None:
weights = np.asarray(weights)
if (weights.size > 1 and
(weights.ndim != 1 or weights.shape[0] == exog.shape[1])):
raise ValueError('weights has wrong shape')
### end
if pred_kwds is None:
pred_kwds = {}
predicted_mean = self.model.predict(self.params, exog, **pred_kwds)
covb = self.cov_params()
var_pred_mean = (exog * np.dot(covb, exog.T).T).sum(1)
# TODO: check that we have correct scale, Refactor scale #???
var_resid = self.scale / weights # self.mse_resid / weights
# special case for now:
if self.cov_type == 'fixed scale':
var_resid = self.cov_kwds['scale'] / weights
dist = ['norm', 't'][self.use_t]
return PredictionResults(predicted_mean, var_pred_mean, var_resid,
df=self.df_resid, dist=dist,
row_labels=row_labels)
| bsd-3-clause |
walterreade/scikit-learn | examples/cluster/plot_digits_linkage.py | 369 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
aosingh/Regularization | Lp/LearningStability3.py | 1 | 1692 | import numpy as np
from LpTester import start_lp_regression
from sklearn.datasets.samples_generator import make_regression
from sklearn.model_selection import train_test_split
from pprint import pprint
import random
# Define synthetic data-set constants. Change this to experiment with different data sets
NUM_OF_SAMPLES = 2000
NUM_OF_FEATURES = 2
NOISE = 10
# Define the number of iterations and learning rate for Linear regression.
NUM_OF_ITERATIONS =2000
LEARNING_RATE = 0.01
# generate sample data-set using the following function.
x, y = make_regression(n_samples=NUM_OF_SAMPLES,
n_features=NUM_OF_FEATURES,
n_informative=1,
noise=NOISE)
# Add a columns of 1s as bias(intercept) in the training records
x = np.c_[np.ones(x.shape[0]), x]
print np.shape(x)
print np.shape(y)
weights = []
for i in range(0,100):
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.5, random_state=int(random.uniform(1, 76)))
print np.shape(x_train)
print np.shape(y_train)
weight_table, MSEcost = start_lp_regression(x_train,y_train)
weights.append(weight_table[-1]);
pprint(weights[1])
weights1 = [rows[0] for rows in weights]
weights2 = [rows[1] for rows in weights]
weights3 = [rows[2] for rows in weights]
variance1 = np.std(weights1)
variance2 = np.std(weights2)
variance3 = np.std(weights3)
mean1 = np.mean(weights1)
mean2 = np.mean(weights2)
mean3 = np.mean(weights3)
print "Stability of the Lp Regularizer 100 iterations are = {:2e}(+/-{:.2e}), {:2e}(+/-{:2e}), {:2e}(+/-{:2e}))".format(mean1, variance1, mean2, variance2, mean3, variance3)
| mit |
cavestruz/StrongCNN | models/plot_fits.py | 1 | 2688 | import sys, os, ast
import matplotlib.pyplot as plt
from StrongCNN.IO.load_images import load_images
from StrongCNN.IO.config_parser import parse_configfile
from matplotlib.colors import LogNorm
from StrongCNN.utils.read_model_out import read_failed_ids
from skimage.feature import hog
def read_hog_kwargs(modeldir) :
cfg = parse_configfile(modeldir)
hog_params = {k.split('hog__')[1]: ast.literal_eval(v) for \
k, v in cfg['param_grid'].iteritems() \
if k.startswith('hog') }
return hog_params
def data2plot( fitsfiles, name ) :
fitsdata = load_images(fitsfiles)
nrows = len(fitsfiles)/2
return name, nrows, fitsdata, [hog(fd, visualise=True, **read_hog_kwargs(modeldir)) for fd in fitsdata]
def multiplot( nrows ) :
fig, axes = plt.subplots(nrows=nrows, ncols=2, figsize=(12, nrows*6),
subplot_kw={'xticks':[],'yticks':[]})
fig.subplots_adjust(hspace=0.1,wspace=0.05)
return fig, axes
def multi_imshow( name, nrows, fitsdata, hogdata ) :
fig, axes = multiplot(nrows)
for ax, d in zip(axes.flat,fitsdata) :
im = ax.imshow(d, cmap='gray', norm=LogNorm())
fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
# fig.colorbar(im, cax=cbar_ax)
plt.savefig(imagedir+name+'.pdf')
def multi_hogvisualization( name, nrows, fitsdata, hogdata ) :
fig, axes = multiplot(nrows)
for ax, d in zip(axes.flat, hogdata) :
im = ax.imshow(d[1], cmap='gray', norm=LogNorm())
fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
# fig.colorbar(im, cax=cbar_ax)
plt.savefig(imagedir+name+'_hogvisualization.pdf')
def multi_hoghistogram( name, nrows, fitsdata, hogdata ) :
fig, axes = multiplot(nrows)
for ax, d in zip(axes.flat, hogdata) :
im = ax.plot(d[0])
fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
# fig.colorbar(im, cax=cbar_ax)
plt.savefig(imagedir+name+'_hoghistogram.pdf')
modeldir = sys.argv[1]
imagedir = modeldir+'/images/'
if not os.path.exists(imagedir) : os.mkdir(imagedir)
# Should return a list of all failed ids
failed_ids = read_failed_ids(modeldir)
lensed_failed_ids = [ fid for fid in failed_ids if 'unlensed' not in fid ]
unlensed_failed_ids = [ fid for fid in failed_ids if 'unlensed' in fid ]
d2p = data2plot(lensed_failed_ids, 'lensed_failed')
multi_imshow(*d2p)
multi_hogvisualization(*d2p)
multi_hoghistogram(*d2p)
d2p = data2plot(unlensed_failed_ids, 'unlensed_failed')
multi_imshow(*d2p)
multi_hogvisualization(*d2p)
multi_hoghistogram(*d2p)
| mit |
fsimkovic/conkit | conkit/plot/contactmapmatrix.py | 1 | 7662 | # BSD 3-Clause License
#
# Copyright (c) 2016-18, University of Liverpool
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A module to produce a contact map plot"""
from __future__ import division
from __future__ import print_function
__author__ = "Felix Simkovic"
__date__ = "10 Jan 2018"
__version__ = "1.0"
import matplotlib.collections as mcoll
import matplotlib.pyplot as plt
import numpy as np
from conkit.core.struct import Gap
from conkit.misc import normalize
from conkit.plot.figure import Figure
from conkit.plot.tools import ColorDefinitions, _isinstance
class ContactMapMatrixFigure(Figure):
"""A Figure object specifically for a :obj:`~conkit.core.contactmap.ContactMap`
This figure will illustrate the contacts in a contact
map matrix. This plot is a very common representation of contacts.
With this figure, you can illustrate either your contact
map by itself, compared against a second contact map, and/or
matched against contacts extracted from a contact map.
Attributes
----------
hierarchy : :obj:`~conkit.core.contactmap.ContactMap`
The default contact map hierarchy
other : :obj:`~conkit.core.contactmap.ContactMap`
The second contact map hierarchy
altloc : bool
Use the :attr:`~conkit.core.contact.Contact.res_altloc` positions [default: False]
Examples
--------
>>> import conkit
>>> cmap = conkit.io.read('toxd/toxd.mat', 'ccmpred').top_map
>>> conkit.plot.ContactMapMatrixFigure(cmap)
"""
def __init__(self, hierarchy, other=None, altloc=False, lim=None, **kwargs):
"""A new contact map plot
Parameters
----------
hierarchy : :obj:`~conkit.core.contactmap.ContactMap`
The default contact map hierarchy
other : :obj:`~conkit.core.contactmap.ContactMap`, optional
The second contact map hierarchy
altloc : bool, optional
Use the :attr:`~conkit.core.contact.Contact.res_altloc` positions [default: False]
lim : tuple, list, optional
The [min, max] residue numbers to show
**kwargs
General :obj:`~conkit.plot.figure.Figure` keyword arguments
"""
super(ContactMapMatrixFigure, self).__init__(**kwargs)
self._hierarchy = None
self._other = None
self._lim = None
self.altloc = altloc
self.hierarchy = hierarchy
if other:
self.other = other
if lim:
self.lim = lim
self.draw()
def __repr__(self):
return self.__class__.__name__
@property
def hierarchy(self):
return self._hierarchy
@hierarchy.setter
def hierarchy(self, hierarchy):
if hierarchy and _isinstance(hierarchy, "ContactMap"):
self._hierarchy = hierarchy
else:
raise TypeError("Invalid hierarchy type: %s" % hierarchy.__class__.__name__)
@property
def other(self):
return self._other
@other.setter
def other(self, hierarchy):
if hierarchy and _isinstance(hierarchy, "ContactMap"):
self._other = hierarchy
else:
raise TypeError("Invalid hierarchy type: %s" % hierarchy.__class__.__name__)
@property
def lim(self):
return self._lim
@lim.setter
def lim(self, lim):
if isinstance(lim, (list, tuple)) and len(lim) == 2:
self._lim = lim
elif isinstance(lim, (list, tuple)):
raise ValueError("A list with 2 entries is required!")
else:
raise TypeError("A list with [min, max] limits is required!")
def draw(self):
_hierarchy = self._hierarchy.rescale()
self_data = np.array([c for c in _hierarchy.as_list() if all(ci != Gap.IDENTIFIER for ci in c)])
self_colors = ContactMapMatrixFigure._determine_color(_hierarchy)
self_rawsc = np.array(
[c.raw_score for c in _hierarchy if all(ci != Gap.IDENTIFIER for ci in [c.res1_seq, c.res2_seq])])
if self._other:
_other = self._other.rescale()
other_data = np.array([c for c in _other.as_list() if any(ci != Gap.IDENTIFIER for ci in c)])
other_colors = ContactMapMatrixFigure._determine_color(_other)
other_rawsc = np.array(
[c.raw_score for c in _other if all(ci != Gap.IDENTIFIER for ci in [c.res1_seq, c.res2_seq])])
else:
other_data = self_data
other_colors = self_colors
other_rawsc = self_rawsc
self._patch_scatter(
self_data[:, 1], self_data[:, 0], symbol="s", facecolor=self_colors, radius=1.0, linewidth=0)
self._patch_scatter(
other_data[:, 0], other_data[:, 1], symbol="s", facecolor=other_colors, radius=1.0, linewidth=0)
if self.lim:
min_max_data = np.arange(self.lim[0], self.lim[1] + 1)
self.ax.set_xlim(self.lim[0] - 0.5, self.lim[1] + 0.5)
self.ax.set_ylim(self.lim[0] - 0.5, self.lim[1] + 0.5)
else:
min_max_data = np.append(self_data[:, 0], self_data[:, 1])
min_max_data = np.append(min_max_data, other_data[:, 0])
min_max_data = np.append(min_max_data, other_data[:, 1])
self.ax.set_xlim(min_max_data.min(), min_max_data.max() + 1.)
self.ax.set_ylim(min_max_data.min(), min_max_data.max() + 1.)
gap = 10 * (min_max_data.max() - min_max_data.min()) // 100
if gap < 1:
gap = 1
tick_range = np.arange(min_max_data.min(), min_max_data.max(), gap, dtype=np.int64)
self.ax.set_xticks(tick_range + 0.5)
self.ax.set_xticklabels(tick_range)
self.ax.set_yticks(tick_range + 0.5)
self.ax.set_yticklabels(tick_range)
self.ax.set_xlabel('Residue number')
self.ax.set_ylabel('Residue number')
# TODO: deprecate this in 0.10
if self._file_name:
self.savefig(self._file_name, dpi=self._dpi)
@staticmethod
def _determine_color(h):
"""Determine the color of the contacts in order"""
greys = plt.get_cmap("Greys")
return [greys(contact.raw_score) for contact in h]
| bsd-3-clause |
mtconley/turntable | test/lib/python2.7/site-packages/scipy/cluster/tests/test_hierarchy.py | 7 | 34863 | #! /usr/bin/env python
#
# Author: Damian Eads
# Date: April 17, 2008
#
# Copyright (C) 2008 Damian Eads
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (TestCase, run_module_suite, dec, assert_raises,
assert_allclose, assert_equal, assert_)
from scipy.lib.six import xrange, u
import scipy.cluster.hierarchy
from scipy.cluster.hierarchy import (
linkage, from_mlab_linkage, to_mlab_linkage, num_obs_linkage, inconsistent,
cophenet, fclusterdata, fcluster, is_isomorphic, single, leaders,
correspond, is_monotonic, maxdists, maxinconsts, maxRstat,
is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram)
from scipy.spatial.distance import pdist
import hierarchy_test_data
# Matplotlib is not a scipy dependency but is optionally used in dendrogram, so
# check if it's available
try:
# import matplotlib
import matplotlib
# and set the backend to be Agg (no gui)
matplotlib.use('Agg')
# before importing pyplot
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
class TestLinkage(object):
def test_linkage_empty_distance_matrix(self):
# Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected.
y = np.zeros((0,))
assert_raises(ValueError, linkage, y)
################### linkage
def test_linkage_tdist(self):
for method in ['single', 'complete', 'average', 'weighted', u('single')]:
yield self.check_linkage_tdist, method
def check_linkage_tdist(self, method):
# Tests linkage(Y, method) on the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method)
assert_allclose(Z, expectedZ, atol=1e-10)
################### linkage on Q
def test_linkage_X(self):
for method in ['centroid', 'median', 'ward']:
yield self.check_linkage_q, method
def check_linkage_q(self, method):
# Tests linkage(Y, method) on the Q data set.
Z = linkage(hierarchy_test_data.X, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method)
assert_allclose(Z, expectedZ, atol=1e-06)
class TestInconsistent(object):
def test_inconsistent_tdist(self):
for depth in hierarchy_test_data.inconsistent_ytdist:
yield self.check_inconsistent_tdist, depth
def check_inconsistent_tdist(self, depth):
Z = hierarchy_test_data.linkage_ytdist_single
assert_allclose(inconsistent(Z, depth),
hierarchy_test_data.inconsistent_ytdist[depth])
class TestCopheneticDistance(object):
def test_linkage_cophenet_tdist_Z(self):
# Tests cophenet(Z) on tdist data set.
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
Z = hierarchy_test_data.linkage_ytdist_single
M = cophenet(Z)
assert_allclose(M, expectedM, atol=1e-10)
def test_linkage_cophenet_tdist_Z_Y(self):
# Tests cophenet(Z, Y) on tdist data set.
Z = hierarchy_test_data.linkage_ytdist_single
(c, M) = cophenet(Z, hierarchy_test_data.ytdist)
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
expectedc = 0.639931296433393415057366837573
assert_allclose(c, expectedc, atol=1e-10)
assert_allclose(M, expectedM, atol=1e-10)
class TestMLabLinkageConversion(object):
def test_mlab_linkage_conversion_empty(self):
# Tests from/to_mlab_linkage on empty linkage array.
X = np.asarray([])
assert_equal(from_mlab_linkage([]), X)
assert_equal(to_mlab_linkage([]), X)
def test_mlab_linkage_conversion_single_row(self):
# Tests from/to_mlab_linkage on linkage array with single row.
Z = np.asarray([[0., 1., 3., 2.]])
Zm = [[1, 2, 3]]
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
def test_mlab_linkage_conversion_multiple_rows(self):
# Tests from/to_mlab_linkage on linkage array with multiple rows.
Zm = np.asarray([[3, 6, 138], [4, 5, 219],
[1, 8, 255], [2, 9, 268], [7, 10, 295]])
Z = np.array([[2., 5., 138., 2.],
[3., 4., 219., 2.],
[0., 7., 255., 3.],
[1., 8., 268., 4.],
[6., 9., 295., 6.]],
dtype=np.double)
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
class TestFcluster(object):
def test_fclusterdata(self):
for t in hierarchy_test_data.fcluster_inconsistent:
yield self.check_fclusterdata, t, 'inconsistent'
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fclusterdata, t, 'distance'
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fclusterdata, t, 'maxclust'
def check_fclusterdata(self, t, criterion):
# Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
X = hierarchy_test_data.Q_X
T = fclusterdata(X, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster(self):
for t in hierarchy_test_data.fcluster_inconsistent:
yield self.check_fcluster, t, 'inconsistent'
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fcluster, t, 'distance'
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fcluster, t, 'maxclust'
def check_fcluster(self, t, criterion):
# Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster_monocrit(self):
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fcluster_monocrit, t
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fcluster_maxclust_monocrit, t
def check_fcluster_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_distance[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
def check_fcluster_maxclust_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_maxclust[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
class TestLeaders(object):
def test_leaders_single(self):
# Tests leaders using a flat clustering generated by single linkage.
X = hierarchy_test_data.Q_X
Y = pdist(X)
Z = linkage(Y)
T = fcluster(Z, criterion='maxclust', t=3)
Lright = (np.array([53, 55, 56]), np.array([2, 3, 1]))
L = leaders(Z, T)
assert_equal(L, Lright)
class TestIsIsomorphic(object):
def test_is_isomorphic_1(self):
# Tests is_isomorphic on test case #1 (one flat cluster, different labellings)
a = [1, 1, 1]
b = [2, 2, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_2(self):
# Tests is_isomorphic on test case #2 (two flat clusters, different labelings)
a = [1, 7, 1]
b = [2, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_3(self):
# Tests is_isomorphic on test case #3 (no flat clusters)
a = []
b = []
assert_(is_isomorphic(a, b))
def test_is_isomorphic_4A(self):
# Tests is_isomorphic on test case #4A (3 flat clusters, different labelings, isomorphic)
a = [1, 2, 3]
b = [1, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_4B(self):
# Tests is_isomorphic on test case #4B (3 flat clusters, different labelings, nonisomorphic)
a = [1, 2, 3, 3]
b = [1, 3, 2, 3]
assert_(is_isomorphic(a, b) == False)
assert_(is_isomorphic(b, a) == False)
def test_is_isomorphic_4C(self):
# Tests is_isomorphic on test case #4C (3 flat clusters, different labelings, isomorphic)
a = [7, 2, 3]
b = [6, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_5(self):
# Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling).
for nc in [2, 3, 5]:
yield self.help_is_isomorphic_randperm, 1000, nc
def test_is_isomorphic_6(self):
# Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling, slightly
# nonisomorphic.)
for nc in [2, 3, 5]:
yield self.help_is_isomorphic_randperm, 1000, nc, True, 5
def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0):
for k in range(3):
a = np.int_(np.random.rand(nobs) * nclusters)
b = np.zeros(a.size, dtype=np.int_)
P = np.random.permutation(nclusters)
for i in xrange(0, a.shape[0]):
b[i] = P[a[i]]
if noniso:
Q = np.random.permutation(nobs)
b[Q[0:nerrors]] += 1
b[Q[0:nerrors]] %= nclusters
assert_(is_isomorphic(a, b) == (not noniso))
assert_(is_isomorphic(b, a) == (not noniso))
class TestIsValidLinkage(object):
def test_is_valid_linkage_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
yield self.check_is_valid_linkage_various_size, nrow, ncol, valid
def check_is_valid_linkage_various_size(self, nrow, ncol, valid):
# Tests is_valid_linkage(Z) with linkage matrics of various sizes
Z = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
Z = Z[:nrow, :ncol]
assert_(is_valid_linkage(Z) == valid)
if not valid:
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_int_type(self):
# Tests is_valid_linkage(Z) with integer type.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.int)
assert_(is_valid_linkage(Z) == False)
assert_raises(TypeError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_empty(self):
# Tests is_valid_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(is_valid_linkage(Z) == True)
def test_is_valid_linkage_4_and_up_neg_index_left(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (left).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,0] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_index_right(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (right).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,1] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_dist(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative distances.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,2] = -0.5
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_counts(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative counts.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,3] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
class TestIsValidInconsistent(object):
def test_is_valid_im_int_type(self):
# Tests is_valid_im(R) with integer type.
R = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.int)
assert_(is_valid_im(R) == False)
assert_raises(TypeError, is_valid_im, R, throw=True)
def test_is_valid_im_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
yield self.check_is_valid_im_various_size, nrow, ncol, valid
def check_is_valid_im_various_size(self, nrow, ncol, valid):
# Tests is_valid_im(R) with linkage matrics of various sizes
R = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
R = R[:nrow, :ncol]
assert_(is_valid_im(R) == valid)
if not valid:
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_empty(self):
# Tests is_valid_im(R) with empty inconsistency matrix.
R = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
assert_(is_valid_im(R) == True)
def test_is_valid_im_4_and_up_neg_index_left(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height means.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,0] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_index_right(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height standard deviations.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,1] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_dist(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link counts.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,2] = -0.5
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
class TestNumObsLinkage(TestCase):
def test_num_obs_linkage_empty(self):
# Tests num_obs_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
self.assertRaises(ValueError, num_obs_linkage, Z)
def test_num_obs_linkage_1x4(self):
# Tests num_obs_linkage(Z) on linkage over 2 observations.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
self.assertTrue(num_obs_linkage(Z) == 2)
def test_num_obs_linkage_2x4(self):
# Tests num_obs_linkage(Z) on linkage over 3 observations.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
self.assertTrue(num_obs_linkage(Z) == 3)
def test_num_obs_linkage_4_and_up(self):
# Tests num_obs_linkage(Z) on linkage on observation sets between sizes
# 4 and 15 (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
self.assertTrue(num_obs_linkage(Z) == i)
class TestLeavesList(object):
def test_leaves_list_1x4(self):
# Tests leaves_list(Z) on a 1x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1])
def test_leaves_list_2x4(self):
# Tests leaves_list(Z) on a 2x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1, 2])
def test_leaves_list_Q(self):
for method in ['single', 'complete', 'average', 'weighted', 'centroid',
'median', 'ward']:
yield self.check_leaves_list_Q, method
def check_leaves_list_Q(self, method):
# Tests leaves_list(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
node = to_tree(Z)
assert_equal(node.pre_order(), leaves_list(Z))
def test_Q_subtree_pre_order(self):
# Tests that pre_order() works when called on sub-trees.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
node = to_tree(Z)
assert_equal(node.pre_order(), (node.get_left().pre_order()
+ node.get_right().pre_order()))
class TestCorrespond(TestCase):
def test_correspond_empty(self):
# Tests correspond(Z, y) with empty linkage and condensed distance matrix.
y = np.zeros((0,))
Z = np.zeros((0,4))
self.assertRaises(ValueError, correspond, Z, y)
def test_correspond_2_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes.
for i in xrange(2, 4):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
self.assertTrue(correspond(Z, y))
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
self.assertTrue(correspond(Z, y))
def test_correspond_4_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondance should be false.
for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) +
list(zip(list(range(3, 5)), list(range(2, 4))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
self.assertTrue(correspond(Z, y2) == False)
self.assertTrue(correspond(Z2, y) == False)
def test_correspond_4_and_up_2(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondance should be false.
for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) +
list(zip(list(range(2, 7)), list(range(16, 21))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
self.assertTrue(correspond(Z, y2) == False)
self.assertTrue(correspond(Z2, y) == False)
def test_num_obs_linkage_multi_matrix(self):
# Tests num_obs_linkage with observation matrices of multiple sizes.
for n in xrange(2, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
Z = linkage(Y)
self.assertTrue(num_obs_linkage(Z) == n)
class TestIsMonotonic(TestCase):
def test_is_monotonic_empty(self):
# Tests is_monotonic(Z) on an empty linkage.
Z = np.zeros((0, 4))
self.assertRaises(ValueError, is_monotonic, Z)
def test_is_monotonic_1x4(self):
# Tests is_monotonic(Z) on 1x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == True)
def test_is_monotonic_2x4_T(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 3]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == True)
def test_is_monotonic_2x4_F(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting False.
Z = np.asarray([[0, 1, 0.4, 2],
[2, 3, 0.3, 3]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == False)
def test_is_monotonic_3x4_T(self):
# Tests is_monotonic(Z) on 3x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == True)
def test_is_monotonic_3x4_F1(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.2, 2],
[4, 5, 0.6, 4]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == False)
def test_is_monotonic_3x4_F2(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False.
Z = np.asarray([[0, 1, 0.8, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == False)
def test_is_monotonic_3x4_F3(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.2, 4]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == False)
def test_is_monotonic_tdist_linkage1(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Expecting True.
Z = linkage(hierarchy_test_data.ytdist, 'single')
self.assertTrue(is_monotonic(Z) == True)
def test_is_monotonic_tdist_linkage2(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Perturbing. Expecting False.
Z = linkage(hierarchy_test_data.ytdist, 'single')
Z[2,2] = 0.0
self.assertTrue(is_monotonic(Z) == False)
def test_is_monotonic_Q_linkage(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# Q data set. Expecting True.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
self.assertTrue(is_monotonic(Z) == True)
class TestMaxDists(object):
def test_maxdists_empty_linkage(self):
# Tests maxdists(Z) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxdists, Z)
def test_maxdists_one_cluster_linkage(self):
# Tests maxdists(Z) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxdists_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
yield self.check_maxdists_Q_linkage, method
def check_maxdists_Q_linkage(self, method):
# Tests maxdists(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxInconsts(object):
def test_maxinconsts_empty_linkage(self):
# Tests maxinconsts(Z, R) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_difrow_linkage(self):
# Tests maxinconsts(Z, R) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_one_cluster_linkage(self):
# Tests maxinconsts(Z, R) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxinconsts_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
yield self.check_maxinconsts_Q_linkage, method
def check_maxinconsts_Q_linkage(self, method):
# Tests maxinconsts(Z, R) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxRStat(object):
def test_maxRstat_invalid_index(self):
for i in [3.3, -1, 4]:
yield self.check_maxRstat_invalid_index, i
def check_maxRstat_invalid_index(self, i):
# Tests maxRstat(Z, R, i). Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
if isinstance(i, int):
assert_raises(ValueError, maxRstat, Z, R, i)
else:
assert_raises(TypeError, maxRstat, Z, R, i)
def test_maxRstat_empty_linkage(self):
for i in range(4):
yield self.check_maxRstat_empty_linkage, i
def check_maxRstat_empty_linkage(self, i):
# Tests maxRstat(Z, R, i) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_difrow_linkage(self):
for i in range(4):
yield self.check_maxRstat_difrow_linkage, i
def check_maxRstat_difrow_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_one_cluster_linkage(self):
for i in range(4):
yield self.check_maxRstat_one_cluster_linkage, i
def check_maxRstat_one_cluster_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxRstat_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
for i in range(4):
yield self.check_maxRstat_Q_linkage, method, i
def check_maxRstat_Q_linkage(self, method, i):
# Tests maxRstat(Z, R, i) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestDendrogram(object):
def test_dendrogram_single_linkage_tdist(self):
# Tests dendrogram calculation on single linkage of the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, no_plot=True)
leaves = R["leaves"]
assert_equal(leaves, [2, 5, 1, 0, 3, 4])
def test_valid_orientation(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_raises(ValueError, dendrogram, Z, orientation="foo")
@dec.skipif(not have_matplotlib)
def test_dendrogram_plot(self):
for orientation in ['top', 'bottom', 'left', 'right']:
yield self.check_dendrogram_plot, orientation
def check_dendrogram_plot(self, orientation):
# Tests dendrogram plotting.
Z = linkage(hierarchy_test_data.ytdist, 'single')
expected = {'color_list': ['g', 'b', 'b', 'b', 'b'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 219.0, 219.0, 0.0],
[0.0, 255.0, 255.0, 219.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[45.0, 45.0, 55.0, 55.0],
[35.0, 35.0, 50.0, 50.0],
[25.0, 25.0, 42.5, 42.5],
[10.0, 10.0, 33.75, 33.75]],
'ivl': ['2', '5', '1', '0', '3', '4'],
'leaves': [2, 5, 1, 0, 3, 4]}
fig = plt.figure()
ax = fig.add_subplot(111)
# test that dendrogram accepts ax keyword
R1 = dendrogram(Z, ax=ax, orientation=orientation)
plt.close()
assert_equal(R1, expected)
# test plotting to gca (will import pylab)
R2 = dendrogram(Z, orientation=orientation)
plt.close()
assert_equal(R2, expected)
@dec.skipif(not have_matplotlib)
def test_dendrogram_truncate_mode(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, 2, 'lastp', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['b'],
'dcoord': [[0.0, 295.0, 295.0, 0.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0]],
'ivl': ['(2)', '(4)'],
'leaves': [6, 9]})
R = dendrogram(Z, 2, 'mtica', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['g', 'b', 'b', 'b'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 255.0, 255.0, 0.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[35.0, 35.0, 45.0, 45.0],
[25.0, 25.0, 40.0, 40.0],
[10.0, 10.0, 32.5, 32.5]],
'ivl': ['2', '5', '1', '0', '(2)'],
'leaves': [2, 5, 1, 0, 7]})
def calculate_maximum_distances(Z):
# Used for testing correctness of maxdists.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in xrange(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = Z[i, 2]
B[i] = q.max()
return B
def calculate_maximum_inconsistencies(Z, R, k=3):
# Used for testing correctness of maxinconsts.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in xrange(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = R[i, k]
B[i] = q.max()
return B
def test_euclidean_linkage_value_error():
for method in scipy.cluster.hierarchy._cpy_euclid_methods:
assert_raises(ValueError,
linkage, [[1, 1], [1, 1]], method=method, metric='cityblock')
def test_2x2_linkage():
Z1 = linkage([1], method='single', metric='euclidean')
Z2 = linkage([[0, 1], [0, 0]], method='single', metric='euclidean')
assert_allclose(Z1, Z2)
if __name__ == "__main__":
run_module_suite()
| mit |
hugobowne/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
ephillipe/nltk-trainer | nltk_trainer/classification/args.py | 6 | 8308 | from nltk.classify import DecisionTreeClassifier, MaxentClassifier, NaiveBayesClassifier, megam
from nltk_trainer import basestring
from nltk_trainer.classification.multi import AvgProbClassifier
classifier_choices = ['NaiveBayes', 'DecisionTree', 'Maxent'] + MaxentClassifier.ALGORITHMS
dense_classifiers = set(['ExtraTreesClassifier', 'GradientBoostingClassifier',
'RandomForestClassifier', 'GaussianNB', 'DecisionTreeClassifier'])
verbose_classifiers = set(['RandomForestClassifier', 'SVC'])
try:
import svmlight # do this first since svm module makes ugly errors
from nltk.classify.svm import SvmClassifier
classifier_choices.append('Svm')
except:
pass
try:
from nltk.classify import scikitlearn
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import Pipeline
from sklearn import ensemble, feature_selection, linear_model, naive_bayes, neighbors, svm, tree
classifiers = [
ensemble.ExtraTreesClassifier,
ensemble.GradientBoostingClassifier,
ensemble.RandomForestClassifier,
linear_model.LogisticRegression,
#linear_model.SGDClassifier, # NOTE: this seems terrible, but could just be the options
naive_bayes.BernoulliNB,
naive_bayes.GaussianNB,
naive_bayes.MultinomialNB,
neighbors.KNeighborsClassifier, # TODO: options for nearest neighbors
svm.LinearSVC,
svm.NuSVC,
svm.SVC,
tree.DecisionTreeClassifier,
]
sklearn_classifiers = {}
for classifier in classifiers:
sklearn_classifiers[classifier.__name__] = classifier
classifier_choices.extend(sorted(['sklearn.%s' % c.__name__ for c in classifiers]))
except ImportError as exc:
sklearn_classifiers = {}
def add_maxent_args(parser):
maxent_group = parser.add_argument_group('Maxent Classifier',
'These options only apply when a Maxent classifier is chosen.')
maxent_group.add_argument('--max_iter', default=10, type=int,
help='maximum number of training iterations, defaults to %(default)d')
maxent_group.add_argument('--min_ll', default=0, type=float,
help='stop classification when average log-likelihood is less than this, default is %(default)d')
maxent_group.add_argument('--min_lldelta', default=0.1, type=float,
help='''stop classification when the change in average log-likelihood is less than this.
default is %(default)f''')
def add_decision_tree_args(parser):
decisiontree_group = parser.add_argument_group('Decision Tree Classifier',
'These options only apply when the DecisionTree classifier is chosen')
decisiontree_group.add_argument('--entropy_cutoff', default=0.05, type=float,
help='default is 0.05')
decisiontree_group.add_argument('--depth_cutoff', default=100, type=int,
help='default is 100')
decisiontree_group.add_argument('--support_cutoff', default=10, type=int,
help='default is 10')
sklearn_kwargs = {
# ensemble
'ExtraTreesClassifier': ['criterion', 'max_feats', 'depth_cutoff', 'n_estimators'],
'GradientBoostingClassifier': ['learning_rate', 'max_feats', 'depth_cutoff', 'n_estimators'],
'RandomForestClassifier': ['criterion', 'max_feats', 'depth_cutoff', 'n_estimators'],
# linear_model
'LogisticRegression': ['C','penalty'],
# naive_bayes
'BernoulliNB': ['alpha'],
'MultinomialNB': ['alpha'],
# svm
'LinearSVC': ['C', 'loss', 'penalty'],
'NuSVC': ['nu', 'kernel'],
'SVC': ['C', 'kernel'],
# tree
'DecisionTreeClassifier': ['criterion', 'max_feats', 'depth_cutoff'],
}
def add_sklearn_args(parser):
if not sklearn_classifiers: return
sklearn_group = parser.add_argument_group('sklearn Classifiers',
'These options are used by one or more sklearn classification algorithms.')
sklearn_group.add_argument('--alpha', type=float, default=1.0,
help='smoothing parameter for naive bayes classifiers, default is %(default)s')
sklearn_group.add_argument('--C', type=float, default=1.0,
help='penalty parameter, default is %(default)s')
sklearn_group.add_argument('--criterion', choices=['gini', 'entropy'],
default='gini', help='Split quality function, default is %(default)s')
sklearn_group.add_argument('--kernel', default='rbf',
choices=['linear', 'poly', 'rbf', 'sigmoid', 'precomputed'],
help='kernel type for support vector machine classifiers, default is %(default)s')
sklearn_group.add_argument('--learning_rate', type=float, default=0.1,
help='learning rate, default is %(default)s')
sklearn_group.add_argument('--loss', choices=['l1', 'l2'],
default='l2', help='loss function, default is %(default)s')
sklearn_group.add_argument('--n_estimators', type=int, default=10,
help='Number of trees for Decision Tree ensembles, default is %(default)s')
sklearn_group.add_argument('--nu', type=float, default=0.5,
help='upper bound on fraction of training errors & lower bound on fraction of support vectors, default is %(default)s')
sklearn_group.add_argument('--penalty', choices=['l1', 'l2'],
default='l2', help='norm for penalization, default is %(default)s')
sklearn_group.add_argument('--tfidf', default=False, action='store_true',
help='Use TfidfTransformer')
# for mapping existing args to sklearn args
sklearn_keys = {
'max_feats': 'max_features',
'depth_cutoff': 'max_depth'
}
def make_sklearn_classifier(algo, args):
name = algo.split('.', 1)[1]
kwargs = {}
for key in sklearn_kwargs.get(name, []):
val = getattr(args, key, None)
if val: kwargs[sklearn_keys.get(key, key)] = val
if args.trace and kwargs:
print('training %s with %s' % (algo, kwargs))
if args.trace and name in verbose_classifiers:
kwargs['verbose'] = True
return sklearn_classifiers[name](**kwargs)
def make_classifier_builder(args):
if isinstance(args.classifier, basestring):
algos = [args.classifier]
else:
algos = args.classifier
for algo in algos:
if algo not in classifier_choices:
raise ValueError('classifier %s is not supported' % algo)
classifier_train_args = []
for algo in algos:
classifier_train_kwargs = {}
if algo == 'DecisionTree':
classifier_train = DecisionTreeClassifier.train
classifier_train_kwargs['binary'] = False
classifier_train_kwargs['entropy_cutoff'] = args.entropy_cutoff
classifier_train_kwargs['depth_cutoff'] = args.depth_cutoff
classifier_train_kwargs['support_cutoff'] = args.support_cutoff
classifier_train_kwargs['verbose'] = args.trace
elif algo == 'NaiveBayes':
classifier_train = NaiveBayesClassifier.train
elif algo == 'Svm':
classifier_train = SvmClassifier.train
elif algo.startswith('sklearn.'):
# TODO: support many options for building an estimator pipeline
pipe = [('classifier', make_sklearn_classifier(algo, args))]
tfidf = getattr(args, 'tfidf', None)
penalty = getattr(args, 'penalty', None)
if tfidf and penalty:
if args.trace:
print('using tfidf transformer with norm %s' % penalty)
pipe.insert(0, ('tfidf', TfidfTransformer(norm=penalty)))
sparse = pipe[-1][1].__class__.__name__ not in dense_classifiers
if not sparse and args.trace:
print('using dense matrix')
value_type = getattr(args, 'value_type', 'bool')
if value_type == 'bool' and not tfidf:
dtype = bool
elif value_type == 'int' and not tfidf:
dtype = int
else:
dtype = float
if args.trace:
print('using dtype %s' % dtype.__name__)
classifier_train = scikitlearn.SklearnClassifier(Pipeline(pipe), dtype=dtype, sparse=sparse).train
else:
if algo != 'Maxent':
classifier_train_kwargs['algorithm'] = algo
if algo == 'MEGAM':
megam.config_megam()
classifier_train = MaxentClassifier.train
classifier_train_kwargs['max_iter'] = args.max_iter
classifier_train_kwargs['min_ll'] = args.min_ll
classifier_train_kwargs['min_lldelta'] = args.min_lldelta
classifier_train_kwargs['trace'] = args.trace
classifier_train_args.append((algo, classifier_train, classifier_train_kwargs))
def trainf(train_feats):
classifiers = []
for algo, classifier_train, train_kwargs in classifier_train_args:
if args.trace:
print('training %s classifier' % algo)
classifiers.append(classifier_train(train_feats, **train_kwargs))
if len(classifiers) == 1:
return classifiers[0]
else:
return AvgProbClassifier(classifiers)
return trainf
#return lambda(train_feats): classifier_train(train_feats, **classifier_train_kwargs)
| apache-2.0 |
tboch/mocpy | mocpy/moc/plot/wcs.py | 1 | 3529 | import numpy as np
from astropy import coordinates
from astropy import wcs
import astropy.units as u
from matplotlib.pyplot import figure
class World2ScreenMPL:
"""
Create a World2ScreenMPL for vizualizing a MOC in a matplotlib axis.
Parameters
----------
fig : `~matplotlib.pyplot.figure`
The matplotlib figure used for plotting the MOC.
fov : `~astropy.units.Quantity`
Size of the field of view.
center : `~astropy.coordinates.SkyCoord`, optional
World coordinates matching with the center of the plot. Default to (0 deg, 0 deg) (in ICRS frame).
coordsys : str, optional
Coordinate system. Default to "icrs". Must be in ["icrs", "galactic"].
projection : str, optional
World base -> Image base projection type. See http://docs.astropy.org/en/stable/wcs/#supported-projections for
the projections currently supported in astropy. Default to Aitoff.
rotation : `~astropy.coordinates.Angle`, optional
The angle of rotation. Default to no rotation.
Returns
-------
wcs : `~astropy.wcs.WCS`
The WCS that can be passed to mocpy.MOC.fill/border.
Examples
--------
>>> from mocpy import MOC, World2ScreenMPL
>>> from astropy.coordinates import Angle, SkyCoord
>>> import astropy.units as u
>>> # Load a MOC
>>> filename = './../resources/P-GALEXGR6-AIS-FUV.fits'
>>> moc = MOC.from_fits(filename)
>>> # Plot the MOC using matplotlib
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(111, figsize=(15, 15))
>>> # Define a World2ScreenMPL as a context
>>> with World2ScreenMPL(fig,
... fov=200 * u.deg,
... center=SkyCoord(0, 20, unit='deg', frame='icrs'),
... coordsys="icrs",
... rotation=Angle(0, u.degree),
... projection="AIT") as wcs:
... ax = fig.add_subplot(1, 1, 1, projection=wcs)
... # Call fill with a matplotlib axe and the `~astropy.wcs.WCS` wcs object.
... moc.fill(ax=ax, wcs=wcs, alpha=0.5, fill=True, color="green")
... moc.border(ax=ax, wcs=wcs, alpha=0.5, color="black")
>>> plt.xlabel('ra')
>>> plt.ylabel('dec')
>>> plt.grid(color="black", linestyle="dotted")
"""
def __init__(self,
fig,
fov,
center=coordinates.SkyCoord(0, 0, unit="deg", frame="icrs"),
coordsys="icrs",
projection="AIT",
rotation=coordinates.Angle(0, u.radian)):
self.w = wcs.WCS(naxis=2)
width_px, height_px = fig.get_size_inches() * float(fig.dpi)
cdelt_x = fov.to_value("deg")/float(width_px)
cdelt_y = fov.to_value("deg")/float(height_px)
self.w.wcs.crpix = [width_px/2.0, height_px/2.0]
self.w.wcs.cdelt = [-cdelt_x, cdelt_x]
if coordsys == 'icrs':
self.w.wcs.crval = [center.icrs.ra.deg, center.icrs.dec.deg]
self.w.wcs.ctype = ['RA---' + projection, 'DEC--' + projection]
elif coordsys == 'galactic':
self.w.wcs.crval = [center.galactic.l.deg, center.galactic.b.deg]
self.w.wcs.ctype = ['GLON-' + projection, 'GLAT-' + projection]
theta = rotation.radian
self.w.wcs.pc = [
[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)],
]
def __enter__(self):
return self.w
def __exit__(self, exception_type, exception_value, traceback):
pass
| gpl-3.0 |
pythonvietnam/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 223 | 1976 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
In this example, we fit a Gaussian Process model onto the diabetes
dataset.
We determine the correlation parameters with maximum likelihood
estimation (MLE). We use an anisotropic squared exponential
correlation model with a constant regression model. We also use a
nugget of 1e-2 to account for the (strong) noise in the targets.
We compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
nmayorov/scikit-learn | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
aselle/tensorflow | tensorflow/python/client/notebook.py | 61 | 4779 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Notebook front-end to TensorFlow.
When you run this binary, you'll see something like below, which indicates
the serving URL of the notebook:
The IPython Notebook is running at: http://127.0.0.1:8888/
Press "Shift+Enter" to execute a cell
Press "Enter" on a cell to go into edit mode.
Press "Escape" to go back into command mode and use arrow keys to navigate.
Press "a" in command mode to insert cell above or "b" to insert cell below.
Your root notebooks directory is FLAGS.notebook_dir
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import socket
import sys
from tensorflow.python.platform import app
# pylint: disable=g-import-not-at-top
# Official recommended way of turning on fast protocol buffers as of 10/21/14
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2"
FLAGS = None
ORIG_ARGV = sys.argv
# Main notebook process calls itself with argv[1]="kernel" to start kernel
# subprocesses.
IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel"
def main(unused_argv):
sys.argv = ORIG_ARGV
if not IS_KERNEL:
# Drop all flags.
sys.argv = [sys.argv[0]]
# NOTE(sadovsky): For some reason, putting this import at the top level
# breaks inline plotting. It's probably a bug in the stone-age version of
# matplotlib.
from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top
notebookapp = NotebookApp.instance()
notebookapp.open_browser = True
# password functionality adopted from quality/ranklab/main/tools/notebook.py
# add options to run with "password"
if FLAGS.password:
from IPython.lib import passwd # pylint: disable=g-import-not-at-top
notebookapp.ip = "0.0.0.0"
notebookapp.password = passwd(FLAGS.password)
else:
print("\nNo password specified; Notebook server will only be available"
" on the local machine.\n")
notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir])
if notebookapp.ip == "0.0.0.0":
proto = "https" if notebookapp.certfile else "http"
url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port,
notebookapp.base_project_url)
print("\nNotebook server will be publicly available at: %s\n" % url)
notebookapp.start()
return
# Drop the --flagfile flag so that notebook doesn't complain about an
# "unrecognized alias" when parsing sys.argv.
sys.argv = ([sys.argv[0]] +
[z for z in sys.argv[1:] if not z.startswith("--flagfile")])
from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top
kernelapp = IPKernelApp.instance()
kernelapp.initialize()
# Enable inline plotting. Equivalent to running "%matplotlib inline".
ipshell = kernelapp.shell
ipshell.enable_matplotlib("inline")
kernelapp.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--password",
type=str,
default=None,
help="""\
Password to require. If set, the server will allow public access. Only
used if notebook config file does not exist.\
""")
parser.add_argument(
"--notebook_dir",
type=str,
default="experimental/brain/notebooks",
help="root location where to store notebooks")
# When the user starts the main notebook process, we don't touch sys.argv.
# When the main process launches kernel subprocesses, it writes all flags
# to a tmpfile and sets --flagfile to that tmpfile, so for kernel
# subprocesses here we drop all flags *except* --flagfile, then call
# app.run(), and then (in main) restore all flags before starting the
# kernel app.
if IS_KERNEL:
# Drop everything except --flagfile.
sys.argv = (
[sys.argv[0]] + [x for x in sys.argv[1:] if x.startswith("--flagfile")])
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/doc/mpl_examples/event_handling/pipong.py | 3 | 8839 | #!/usr/bin/env python
# A matplotlib based game of Pong illustrating one way to write interactive
# animation which are easily ported to multiply backends
# pipong.py was written by Paul Ivanov <http://pirsquared.org>
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import randn, randint
instructions = """
Player A: Player B:
'e' up 'i'
'd' down 'k'
press 't' -- close these instructions
(animation will be much faster)
press 'a' -- add a puck
press 'A' -- remove a puck
press '1' -- slow down all pucks
press '2' -- speed up all pucks
press '3' -- slow down distractors
press '4' -- speed up distractors
press ' ' -- reset the first puck
press 'n' -- toggle distractors on/off
press 'g' -- toggle the game on/off
"""
class Pad(object):
def __init__(self, disp,x,y,type='l'):
self.disp = disp
self.x = x
self.y = y
self.w = .3
self.score = 0
self.xoffset = 0.3
self.yoffset = 0.1
if type=='r':
self.xoffset *= -1.0
if type=='l' or type=='r':
self.signx = -1.0
self.signy = 1.0
else:
self.signx = 1.0
self.signy = -1.0
def contains(self, loc):
return self.disp.get_bbox().contains(loc.x,loc.y)
class Puck(object):
def __init__(self, disp, pad, field):
self.vmax= .2
self.disp = disp
self.field = field
self._reset(pad)
def _reset(self,pad):
self.x = pad.x + pad.xoffset
if pad.y < 0:
self.y = pad.y + pad.yoffset
else:
self.y = pad.y - pad.yoffset
self.vx = pad.x - self.x
self.vy = pad.y + pad.w/2 - self.y
self._speedlimit()
self._slower()
self._slower()
def update(self,pads):
self.x += self.vx
self.y += self.vy
for pad in pads:
if pad.contains(self):
self.vx *= 1.2 *pad.signx
self.vy *= 1.2 *pad.signy
fudge = .001
#probably cleaner with something like...if not self.field.contains(self.x, self.y):
if self.x < 0+fudge:
#print "player A loses"
pads[1].score += 1;
self._reset(pads[0])
return True
if self.x > 7-fudge:
#print "player B loses"
pads[0].score += 1;
self._reset(pads[1])
return True
if self.y < -1+fudge or self.y > 1-fudge:
self.vy *= -1.0
# add some randomness, just to make it interesting
self.vy -= (randn()/300.0 + 1/300.0) * np.sign(self.vy)
self._speedlimit()
return False
def _slower(self):
self.vx /= 5.0
self.vy /= 5.0
def _faster(self):
self.vx *= 5.0
self.vy *= 5.0
def _speedlimit(self):
if self.vx > self.vmax:
self.vx = self.vmax
if self.vx < -self.vmax:
self.vx = -self.vmax
if self.vy > self.vmax:
self.vy = self.vmax
if self.vy < -self.vmax:
self.vy = -self.vmax
class Game(object):
def __init__(self, ax):
# create the initial line
self.ax = ax
padAx = padBx= .50
padAy = padBy= .30
padBx+=6.3
pA, = self.ax.barh(padAy,.2, height=.3,color='k', alpha=.5, edgecolor='b',lw=2,label="Player B", animated=True)
pB, = self.ax.barh(padBy,.2, height=.3, left=padBx, color='k',alpha=.5, edgecolor='r',lw=2,label="Player A",animated=True)
# distractors
self.x = np.arange(0,2.22*np.pi,0.01)
self.line, = self.ax.plot(self.x, np.sin(self.x),"r", animated=True, lw=4)
self.line2, = self.ax.plot(self.x, np.cos(self.x),"g", animated=True, lw=4)
self.line3, = self.ax.plot(self.x, np.cos(self.x),"g", animated=True, lw=4)
self.line4, = self.ax.plot(self.x, np.cos(self.x),"r", animated=True, lw=4)
self.centerline,= self.ax.plot([3.5,3.5], [1,-1],'k',alpha=.5, animated=True, lw=8)
self.puckdisp = self.ax.scatter([1],[1],label='_nolegend_', s=200,c='g',alpha=.9,animated=True)
self.canvas = self.ax.figure.canvas
self.background = None
self.cnt = 0
self.distract = True
self.res = 100.0
self.on = False
self.inst = True # show instructions from the beginning
self.background = None
self.pads = []
self.pads.append( Pad(pA,0,padAy))
self.pads.append( Pad(pB,padBx,padBy,'r'))
self.pucks =[]
self.i = self.ax.annotate(instructions,(.5,0.5),
name='monospace',
verticalalignment='center',
horizontalalignment='center',
multialignment='left',
textcoords='axes fraction',animated=True )
self.canvas.mpl_connect('key_press_event', self.key_press)
def draw(self, evt):
draw_artist = self.ax.draw_artist
if self.background is None:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
# restore the clean slate background
self.canvas.restore_region(self.background)
# show the distractors
if self.distract:
self.line.set_ydata(np.sin(self.x+self.cnt/self.res))
self.line2.set_ydata(np.cos(self.x-self.cnt/self.res))
self.line3.set_ydata(np.tan(self.x+self.cnt/self.res))
self.line4.set_ydata(np.tan(self.x-self.cnt/self.res))
draw_artist(self.line)
draw_artist(self.line2)
draw_artist(self.line3)
draw_artist(self.line4)
# show the instructions - this is very slow
if self.inst:
self.ax.draw_artist(self.i)
# pucks and pads
if self.on:
self.ax.draw_artist(self.centerline)
for pad in self.pads:
pad.disp.set_y(pad.y)
pad.disp.set_x(pad.x)
self.ax.draw_artist(pad.disp)
for puck in self.pucks:
if puck.update(self.pads):
# we only get here if someone scored
self.pads[0].disp.set_label(" "+ str(self.pads[0].score))
self.pads[1].disp.set_label(" "+ str(self.pads[1].score))
self.ax.legend(loc='center')
self.leg = self.ax.get_legend()
#self.leg.draw_frame(False) #don't draw the legend border
self.leg.get_frame().set_alpha(.2)
plt.setp(self.leg.get_texts(),fontweight='bold',fontsize='xx-large')
self.leg.get_frame().set_facecolor('0.2')
self.background = None
self.ax.figure.canvas.draw()
return True
puck.disp.set_offsets([puck.x,puck.y])
self.ax.draw_artist(puck.disp)
# just redraw the axes rectangle
self.canvas.blit(self.ax.bbox)
if self.cnt==50000:
# just so we don't get carried away
print "...and you've been playing for too long!!!"
plt.close()
self.cnt += 1
return True
def key_press(self,event):
if event.key == '3':
self.res *= 5.0
if event.key == '4':
self.res /= 5.0
if event.key == 'e':
self.pads[0].y += .1
if self.pads[0].y > 1 - .3:
self.pads[0].y = 1-.3
if event.key == 'd':
self.pads[0].y -= .1
if self.pads[0].y < -1:
self.pads[0].y = -1
if event.key == 'i':
self.pads[1].y += .1
if self.pads[1].y > 1 - .3:
self.pads[1].y = 1-.3
if event.key == 'k':
self.pads[1].y -= .1
if self.pads[1].y < -1:
self.pads[1].y = -1
if event.key == 'a':
self.pucks.append(Puck(self.puckdisp,self.pads[randint(2)],self.ax.bbox))
if event.key == 'A' and len(self.pucks):
self.pucks.pop()
if event.key == ' ' and len(self.pucks):
self.pucks[0]._reset(self.pads[randint(2)])
if event.key == '1':
for p in self.pucks:
p._slower()
if event.key == '2':
for p in self.pucks:
p._faster()
if event.key == 'n':
self.distract = not self.distract
if event.key == 'g':
#self.ax.clear()
#self.ax.grid() # seems to be necessary for qt backend
self.on = not self.on
if event.key == 't':
self.inst = not self.inst
self.i.set_visible(self.i.get_visible())
if event.key == 'q':
plt.close()
| gpl-2.0 |
librosa/librosa | librosa/core/audio.py | 2 | 49849 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Core IO, DSP and utility functions."""
import pathlib
import warnings
import soundfile as sf
import audioread
import numpy as np
import scipy.signal
import resampy
from numba import jit
from .fft import get_fftlib
from .convert import frames_to_samples, time_to_samples
from .._cache import cache
from .. import util
from ..util.exceptions import ParameterError
__all__ = [
"load",
"stream",
"to_mono",
"resample",
"get_duration",
"get_samplerate",
"autocorrelate",
"lpc",
"zero_crossings",
"clicks",
"tone",
"chirp",
"mu_compress",
"mu_expand",
]
# Resampling bandwidths as percentage of Nyquist
BW_BEST = resampy.filters.get_filter("kaiser_best")[2]
BW_FASTEST = resampy.filters.get_filter("kaiser_fast")[2]
# -- CORE ROUTINES --#
# Load should never be cached, since we cannot verify that the contents of
# 'path' are unchanged across calls.
def load(
path,
sr=22050,
mono=True,
offset=0.0,
duration=None,
dtype=np.float32,
res_type="kaiser_best",
):
"""Load an audio file as a floating point time series.
Audio will be automatically resampled to the given rate
(default ``sr=22050``).
To preserve the native sampling rate of the file, use ``sr=None``.
Parameters
----------
path : string, int, pathlib.Path or file-like object
path to the input file.
Any codec supported by `soundfile` or `audioread` will work.
Any string file paths, or any object implementing Python's
file interface (e.g. `pathlib.Path`) are supported as `path`.
If the codec is supported by `soundfile`, then `path` can also be
an open file descriptor (int).
On the contrary, if the codec is not supported by `soundfile`
(for example, MP3), then `path` must be a file path (string or `pathlib.Path`).
sr : number > 0 [scalar]
target sampling rate
'None' uses the native sampling rate
mono : bool
convert signal to mono
offset : float
start reading after this time (in seconds)
duration : float
only load up to this much audio (in seconds)
dtype : numeric type
data type of ``y``
res_type : str
resample type (see note)
.. note::
By default, this uses `resampy`'s high-quality mode ('kaiser_best').
For alternative resampling modes, see `resample`
.. note::
`audioread` may truncate the precision of the audio data to 16 bits.
See :ref:`ioformats` for alternate loading methods.
Returns
-------
y : np.ndarray [shape=(n,) or (2, n)]
audio time series
sr : number > 0 [scalar]
sampling rate of ``y``
Examples
--------
>>> # Load an ogg vorbis file
>>> filename = librosa.ex('trumpet')
>>> y, sr = librosa.load(filename)
>>> y
array([-1.407e-03, -4.461e-04, ..., -3.042e-05, 1.277e-05],
dtype=float32)
>>> sr
22050
>>> # Load a file and resample to 11 KHz
>>> filename = librosa.ex('trumpet')
>>> y, sr = librosa.load(filename, sr=11025)
>>> y
array([-8.746e-04, -3.363e-04, ..., -1.301e-05, 0.000e+00],
dtype=float32)
>>> sr
11025
>>> # Load 5 seconds of a file, starting 15 seconds in
>>> filename = librosa.ex('brahms')
>>> y, sr = librosa.load(filename, offset=15.0, duration=5.0)
>>> y
array([0.146, 0.144, ..., 0.128, 0.015], dtype=float32)
>>> sr
22050
"""
try:
with sf.SoundFile(path) as sf_desc:
sr_native = sf_desc.samplerate
if offset:
# Seek to the start of the target read
sf_desc.seek(int(offset * sr_native))
if duration is not None:
frame_duration = int(duration * sr_native)
else:
frame_duration = -1
# Load the target number of frames, and transpose to match librosa form
y = sf_desc.read(frames=frame_duration, dtype=dtype, always_2d=False).T
except RuntimeError as exc:
# If soundfile failed, try audioread instead
if isinstance(path, (str, pathlib.PurePath)):
warnings.warn("PySoundFile failed. Trying audioread instead.")
y, sr_native = __audioread_load(path, offset, duration, dtype)
else:
raise (exc)
# Final cleanup for dtype and contiguity
if mono:
y = to_mono(y)
if sr is not None:
y = resample(y, sr_native, sr, res_type=res_type)
else:
sr = sr_native
return y, sr
def __audioread_load(path, offset, duration, dtype):
"""Load an audio buffer using audioread.
This loads one block at a time, and then concatenates the results.
"""
y = []
with audioread.audio_open(path) as input_file:
sr_native = input_file.samplerate
n_channels = input_file.channels
s_start = int(np.round(sr_native * offset)) * n_channels
if duration is None:
s_end = np.inf
else:
s_end = s_start + (int(np.round(sr_native * duration)) * n_channels)
n = 0
for frame in input_file:
frame = util.buf_to_float(frame, dtype=dtype)
n_prev = n
n = n + len(frame)
if n < s_start:
# offset is after the current frame
# keep reading
continue
if s_end < n_prev:
# we're off the end. stop reading
break
if s_end < n:
# the end is in this frame. crop.
frame = frame[: s_end - n_prev]
if n_prev <= s_start <= n:
# beginning is in this frame
frame = frame[(s_start - n_prev) :]
# tack on the current frame
y.append(frame)
if y:
y = np.concatenate(y)
if n_channels > 1:
y = y.reshape((-1, n_channels)).T
else:
y = np.empty(0, dtype=dtype)
return y, sr_native
def stream(
path,
block_length,
frame_length,
hop_length,
mono=True,
offset=0.0,
duration=None,
fill_value=None,
dtype=np.float32,
):
"""Stream audio in fixed-length buffers.
This is primarily useful for processing large files that won't
fit entirely in memory at once.
Instead of loading the entire audio signal into memory (as
in `load`, this function produces *blocks* of audio spanning
a fixed number of frames at a specified frame length and hop
length.
While this function strives for similar behavior to `load`,
there are a few caveats that users should be aware of:
1. This function does not return audio buffers directly.
It returns a generator, which you can iterate over
to produce blocks of audio. A *block*, in this context,
refers to a buffer of audio which spans a given number of
(potentially overlapping) frames.
2. Automatic sample-rate conversion is not supported.
Audio will be streamed in its native sample rate,
so no default values are provided for ``frame_length``
and ``hop_length``. It is recommended that you first
get the sampling rate for the file in question, using
`get_samplerate`, and set these parameters accordingly.
3. Many analyses require access to the entire signal
to behave correctly, such as `resample`, `cqt`, or
`beat_track`, so these methods will not be appropriate
for streamed data.
4. The ``block_length`` parameter specifies how many frames
of audio will be produced per block. Larger values will
consume more memory, but will be more efficient to process
down-stream. The best value will ultimately depend on your
application and other system constraints.
5. By default, most librosa analyses (e.g., short-time Fourier
transform) assume centered frames, which requires padding the
signal at the beginning and end. This will not work correctly
when the signal is carved into blocks, because it would introduce
padding in the middle of the signal. To disable this feature,
use ``center=False`` in all frame-based analyses.
See the examples below for proper usage of this function.
Parameters
----------
path : string, int, or file-like object
path to the input file to stream.
Any codec supported by `soundfile` is permitted here.
block_length : int > 0
The number of frames to include in each block.
Note that at the end of the file, there may not be enough
data to fill an entire block, resulting in a shorter block
by default. To pad the signal out so that blocks are always
full length, set ``fill_value`` (see below).
frame_length : int > 0
The number of samples per frame.
hop_length : int > 0
The number of samples to advance between frames.
Note that by when ``hop_length < frame_length``, neighboring frames
will overlap. Similarly, the last frame of one *block* will overlap
with the first frame of the next *block*.
mono : bool
Convert the signal to mono during streaming
offset : float
Start reading after this time (in seconds)
duration : float
Only load up to this much audio (in seconds)
fill_value : float [optional]
If padding the signal to produce constant-length blocks,
this value will be used at the end of the signal.
In most cases, ``fill_value=0`` (silence) is expected, but
you may specify any value here.
dtype : numeric type
data type of audio buffers to be produced
Yields
------
y : np.ndarray
An audio buffer of (at most)
``(block_length-1) * hop_length + frame_length`` samples.
See Also
--------
load
get_samplerate
soundfile.blocks
Examples
--------
Apply a short-term Fourier transform to blocks of 256 frames
at a time. Note that streaming operation requires left-aligned
frames, so we must set ``center=False`` to avoid padding artifacts.
>>> filename = librosa.ex('brahms')
>>> sr = librosa.get_samplerate(filename)
>>> stream = librosa.stream(filename,
... block_length=256,
... frame_length=4096,
... hop_length=1024)
>>> for y_block in stream:
... D_block = librosa.stft(y_block, center=False)
Or compute a mel spectrogram over a stream, using a shorter frame
and non-overlapping windows
>>> filename = librosa.ex('brahms')
>>> sr = librosa.get_samplerate(filename)
>>> stream = librosa.stream(filename,
... block_length=256,
... frame_length=2048,
... hop_length=2048)
>>> for y_block in stream:
... m_block = librosa.feature.melspectrogram(y_block, sr=sr,
... n_fft=2048,
... hop_length=2048,
... center=False)
"""
if not (np.issubdtype(type(block_length), np.integer) and block_length > 0):
raise ParameterError("block_length={} must be a positive integer")
if not (np.issubdtype(type(frame_length), np.integer) and frame_length > 0):
raise ParameterError("frame_length={} must be a positive integer")
if not (np.issubdtype(type(hop_length), np.integer) and hop_length > 0):
raise ParameterError("hop_length={} must be a positive integer")
# Get the sample rate from the file info
sr = sf.info(path).samplerate
# If the input is a file handle, rewind its read position after `sf.info`
if hasattr(path, "seek"):
path.seek(0)
# Construct the stream
if offset:
start = int(offset * sr)
else:
start = 0
if duration:
frames = int(duration * sr)
else:
frames = -1
blocks = sf.blocks(
path,
blocksize=frame_length + (block_length - 1) * hop_length,
overlap=frame_length - hop_length,
fill_value=fill_value,
start=start,
frames=frames,
dtype=dtype,
always_2d=False,
)
for block in blocks:
if mono:
yield to_mono(block.T)
else:
yield block.T
@cache(level=20)
def to_mono(y):
"""Convert an audio signal to mono by averaging samples across channels.
Parameters
----------
y : np.ndarray [shape=(2,n) or shape=(n,)]
audio time series, either stereo or mono
Returns
-------
y_mono : np.ndarray [shape=(n,)]
``y`` as a monophonic time-series
Notes
-----
This function caches at level 20.
Examples
--------
>>> y, sr = librosa.load(librosa.ex('trumpet', hq=True), mono=False)
>>> y.shape
(2, 117601)
>>> y_mono = librosa.to_mono(y)
>>> y_mono.shape
(117601,)
"""
# Ensure Fortran contiguity.
y = np.asfortranarray(y)
# Validate the buffer. Stereo is ok here.
util.valid_audio(y, mono=False)
if y.ndim > 1:
y = np.mean(y, axis=0)
return y
@cache(level=20)
def resample(
y, orig_sr, target_sr, res_type="kaiser_best", fix=True, scale=False, **kwargs
):
"""Resample a time series from orig_sr to target_sr
By default, this uses a high-quality (but relatively slow) method ('kaiser_best')
for band-limited sinc interpolation. The alternate ``res_type`` values listed below
offer different trade-offs of speed and quality.
Parameters
----------
y : np.ndarray [shape=(n,) or shape=(2, n)]
audio time series. Can be mono or stereo.
orig_sr : number > 0 [scalar]
original sampling rate of ``y``
target_sr : number > 0 [scalar]
target sampling rate
res_type : str
resample type
'kaiser_best' (default)
`resampy` high-quality mode
'kaiser_fast'
`resampy` faster method
'fft' or 'scipy'
`scipy.signal.resample` Fourier method.
'polyphase'
`scipy.signal.resample_poly` polyphase filtering. (fast)
'linear'
`samplerate` linear interpolation. (very fast)
'zero_order_hold'
`samplerate` repeat the last value between samples. (very fast)
'sinc_best', 'sinc_medium' or 'sinc_fastest'
`samplerate` high-, medium-, and low-quality sinc interpolation.
'soxr_vhq', 'soxr_hq', 'soxr_mq' or 'soxr_lq'
`soxr` Very high-, High-, Medium-, Low-quality FFT-based bandlimited interpolation.
``'soxr_hq'`` is the default setting of `soxr` (fast)
'soxr_qq'
`soxr` Quick cubic interpolation (very fast)
.. note::
`samplerate` and `soxr` are not installed with `librosa`.
To use `samplerate` or `soxr`, they should be installed manually::
$ pip install samplerate
$ pip install soxr
.. note::
When using ``res_type='polyphase'``, only integer sampling rates are
supported.
fix : bool
adjust the length of the resampled signal to be of size exactly
``ceil(target_sr * len(y) / orig_sr)``
scale : bool
Scale the resampled signal so that ``y`` and ``y_hat`` have approximately
equal total energy.
kwargs : additional keyword arguments
If ``fix==True``, additional keyword arguments to pass to
`librosa.util.fix_length`.
Returns
-------
y_hat : np.ndarray [shape=(n * target_sr / orig_sr,)]
``y`` resampled from ``orig_sr`` to ``target_sr``
Raises
------
ParameterError
If ``res_type='polyphase'`` and ``orig_sr`` or ``target_sr`` are not both
integer-valued.
See Also
--------
librosa.util.fix_length
scipy.signal.resample
resampy
samplerate.converters.resample
soxr.resample
Notes
-----
This function caches at level 20.
Examples
--------
Downsample from 22 KHz to 8 KHz
>>> y, sr = librosa.load(librosa.ex('trumpet'), sr=22050)
>>> y_8k = librosa.resample(y, sr, 8000)
>>> y.shape, y_8k.shape
((117601,), (42668,))
"""
# First, validate the audio buffer
util.valid_audio(y, mono=False)
if orig_sr == target_sr:
return y
ratio = float(target_sr) / orig_sr
n_samples = int(np.ceil(y.shape[-1] * ratio))
if res_type in ("scipy", "fft"):
y_hat = scipy.signal.resample(y, n_samples, axis=-1)
elif res_type == "polyphase":
if int(orig_sr) != orig_sr or int(target_sr) != target_sr:
raise ParameterError(
"polyphase resampling is only supported for integer-valued sampling rates."
)
# For polyphase resampling, we need up- and down-sampling ratios
# We can get those from the greatest common divisor of the rates
# as long as the rates are integrable
orig_sr = int(orig_sr)
target_sr = int(target_sr)
gcd = np.gcd(orig_sr, target_sr)
y_hat = scipy.signal.resample_poly(y, target_sr // gcd, orig_sr // gcd, axis=-1)
elif res_type in (
"linear",
"zero_order_hold",
"sinc_best",
"sinc_fastest",
"sinc_medium",
):
import samplerate
# We have to transpose here to match libsamplerate
y_hat = samplerate.resample(y.T, ratio, converter_type=res_type).T
elif res_type.startswith('soxr'):
import soxr
# We have to transpose here to match soxr
y_hat = soxr.resample(y.T, orig_sr, target_sr, quality=res_type).T
else:
y_hat = resampy.resample(y, orig_sr, target_sr, filter=res_type, axis=-1)
if fix:
y_hat = util.fix_length(y_hat, n_samples, **kwargs)
if scale:
y_hat /= np.sqrt(ratio)
return np.asfortranarray(y_hat, dtype=y.dtype)
def get_duration(
y=None, sr=22050, S=None, n_fft=2048, hop_length=512, center=True, filename=None
):
"""Compute the duration (in seconds) of an audio time series,
feature matrix, or filename.
Examples
--------
>>> # Load an example audio file
>>> y, sr = librosa.load(librosa.ex('trumpet'))
>>> librosa.get_duration(y=y, sr=sr)
5.333378684807256
>>> # Or directly from an audio file
>>> librosa.get_duration(filename=librosa.ex('trumpet'))
5.333378684807256
>>> # Or compute duration from an STFT matrix
>>> y, sr = librosa.load(librosa.ex('trumpet'))
>>> S = librosa.stft(y)
>>> librosa.get_duration(S=S, sr=sr)
5.317369614512471
>>> # Or a non-centered STFT matrix
>>> S_left = librosa.stft(y, center=False)
>>> librosa.get_duration(S=S_left, sr=sr)
5.224489795918367
Parameters
----------
y : np.ndarray [shape=(n,), (2, n)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of ``y``
S : np.ndarray [shape=(d, t)] or None
STFT matrix, or any STFT-derived matrix (e.g., chromagram
or mel spectrogram).
Durations calculated from spectrogram inputs are only accurate
up to the frame resolution. If high precision is required,
it is better to use the audio time series directly.
n_fft : int > 0 [scalar]
FFT window size for ``S``
hop_length : int > 0 [ scalar]
number of audio samples between columns of ``S``
center : boolean
- If ``True``, ``S[:, t]`` is centered at ``y[t * hop_length]``
- If ``False``, then ``S[:, t]`` begins at ``y[t * hop_length]``
filename : str
If provided, all other parameters are ignored, and the
duration is calculated directly from the audio file.
Note that this avoids loading the contents into memory,
and is therefore useful for querying the duration of
long files.
As in ``load``, this can also be an integer or open file-handle
that can be processed by ``soundfile``.
Returns
-------
d : float >= 0
Duration (in seconds) of the input time series or spectrogram.
Raises
------
ParameterError
if none of ``y``, ``S``, or ``filename`` are provided.
Notes
-----
`get_duration` can be applied to a file (``filename``), a spectrogram (``S``),
or audio buffer (``y, sr``). Only one of these three options should be
provided. If you do provide multiple options (e.g., ``filename`` and ``S``),
then ``filename`` takes precedence over ``S``, and ``S`` takes precedence over
``(y, sr)``.
"""
if filename is not None:
try:
return sf.info(filename).duration
except RuntimeError:
with audioread.audio_open(filename) as fdesc:
return fdesc.duration
if y is None:
if S is None:
raise ParameterError(
"At least one of (y, sr), S, or filename must be provided"
)
n_frames = S.shape[1]
n_samples = n_fft + hop_length * (n_frames - 1)
# If centered, we lose half a window from each end of S
if center:
n_samples = n_samples - 2 * int(n_fft / 2)
else:
# Ensure Fortran contiguity.
y = np.asfortranarray(y)
# Validate the audio buffer. Stereo is okay here.
util.valid_audio(y, mono=False)
if y.ndim == 1:
n_samples = len(y)
else:
n_samples = y.shape[-1]
return float(n_samples) / sr
def get_samplerate(path):
"""Get the sampling rate for a given file.
Parameters
----------
path : string, int, or file-like
The path to the file to be loaded
As in ``load``, this can also be an integer or open file-handle
that can be processed by `soundfile`.
Returns
-------
sr : number > 0
The sampling rate of the given audio file
Examples
--------
Get the sampling rate for the included audio file
>>> path = librosa.ex('trumpet')
>>> librosa.get_samplerate(path)
22050
"""
try:
return sf.info(path).samplerate
except RuntimeError:
with audioread.audio_open(path) as fdesc:
return fdesc.samplerate
@cache(level=20)
def autocorrelate(y, max_size=None, axis=-1):
"""Bounded-lag auto-correlation
Parameters
----------
y : np.ndarray
array to autocorrelate
max_size : int > 0 or None
maximum correlation lag.
If unspecified, defaults to ``y.shape[axis]`` (unbounded)
axis : int
The axis along which to autocorrelate.
By default, the last axis (-1) is taken.
Returns
-------
z : np.ndarray
truncated autocorrelation ``y*y`` along the specified axis.
If ``max_size`` is specified, then ``z.shape[axis]`` is bounded
to ``max_size``.
Notes
-----
This function caches at level 20.
Examples
--------
Compute full autocorrelation of ``y``
>>> y, sr = librosa.load(librosa.ex('trumpet'))
>>> librosa.autocorrelate(y)
array([ 6.899e+02, 6.236e+02, ..., 3.710e-08, -1.796e-08])
Compute onset strength auto-correlation up to 4 seconds
>>> import matplotlib.pyplot as plt
>>> odf = librosa.onset.onset_strength(y=y, sr=sr, hop_length=512)
>>> ac = librosa.autocorrelate(odf, max_size=4* sr / 512)
>>> fig, ax = plt.subplots()
>>> ax.plot(ac)
>>> ax.set(title='Auto-correlation', xlabel='Lag (frames)')
"""
if max_size is None:
max_size = y.shape[axis]
max_size = int(min(max_size, y.shape[axis]))
# Compute the power spectrum along the chosen axis
# Pad out the signal to support full-length auto-correlation.
fft = get_fftlib()
powspec = np.abs(fft.fft(y, n=2 * y.shape[axis] + 1, axis=axis)) ** 2
# Convert back to time domain
autocorr = fft.ifft(powspec, axis=axis)
# Slice down to max_size
subslice = [slice(None)] * autocorr.ndim
subslice[axis] = slice(max_size)
autocorr = autocorr[tuple(subslice)]
if not np.iscomplexobj(y):
autocorr = autocorr.real
return autocorr
def lpc(y, order):
"""Linear Prediction Coefficients via Burg's method
This function applies Burg's method to estimate coefficients of a linear
filter on ``y`` of order ``order``. Burg's method is an extension to the
Yule-Walker approach, which are both sometimes referred to as LPC parameter
estimation by autocorrelation.
It follows the description and implementation approach described in the
introduction by Marple. [#]_ N.B. This paper describes a different method, which
is not implemented here, but has been chosen for its clear explanation of
Burg's technique in its introduction.
.. [#] Larry Marple.
A New Autoregressive Spectrum Analysis Algorithm.
IEEE Transactions on Accoustics, Speech, and Signal Processing
vol 28, no. 4, 1980.
Parameters
----------
y : np.ndarray
Time series to fit
order : int > 0
Order of the linear filter
Returns
-------
a : np.ndarray of length ``order + 1``
LP prediction error coefficients, i.e. filter denominator polynomial
Raises
------
ParameterError
- If ``y`` is not valid audio as per `librosa.util.valid_audio`
- If ``order < 1`` or not integer
FloatingPointError
- If ``y`` is ill-conditioned
See also
--------
scipy.signal.lfilter
Examples
--------
Compute LP coefficients of y at order 16 on entire series
>>> y, sr = librosa.load(librosa.ex('trumpet'))
>>> librosa.lpc(y, 16)
Compute LP coefficients, and plot LP estimate of original series
>>> import matplotlib.pyplot as plt
>>> import scipy
>>> y, sr = librosa.load(librosa.ex('trumpet'), duration=0.020)
>>> a = librosa.lpc(y, 2)
>>> b = np.hstack([[0], -1 * a[1:]])
>>> y_hat = scipy.signal.lfilter(b, [1], y)
>>> fig, ax = plt.subplots()
>>> ax.plot(y)
>>> ax.plot(y_hat, linestyle='--')
>>> ax.legend(['y', 'y_hat'])
>>> ax.set_title('LP Model Forward Prediction')
"""
if not isinstance(order, (int, np.integer)) or order < 1:
raise ParameterError("order must be an integer > 0")
util.valid_audio(y, mono=True)
return __lpc(y, order)
@jit(nopython=True)
def __lpc(y, order):
# This implementation follows the description of Burg's algorithm given in
# section III of Marple's paper referenced in the docstring.
#
# We use the Levinson-Durbin recursion to compute AR coefficients for each
# increasing model order by using those from the last. We maintain two
# arrays and then flip them each time we increase the model order so that
# we may use all the coefficients from the previous order while we compute
# those for the new one. These two arrays hold ar_coeffs for order M and
# order M-1. (Corresponding to a_{M,k} and a_{M-1,k} in eqn 5)
dtype = y.dtype.type
ar_coeffs = np.zeros(order + 1, dtype=dtype)
ar_coeffs[0] = dtype(1)
ar_coeffs_prev = np.zeros(order + 1, dtype=dtype)
ar_coeffs_prev[0] = dtype(1)
# These two arrays hold the forward and backward prediction error. They
# correspond to f_{M-1,k} and b_{M-1,k} in eqns 10, 11, 13 and 14 of
# Marple. First they are used to compute the reflection coefficient at
# order M from M-1 then are re-used as f_{M,k} and b_{M,k} for each
# iteration of the below loop
fwd_pred_error = y[1:]
bwd_pred_error = y[:-1]
# DEN_{M} from eqn 16 of Marple.
den = np.dot(fwd_pred_error, fwd_pred_error) + np.dot(
bwd_pred_error, bwd_pred_error
)
for i in range(order):
if den <= 0:
raise FloatingPointError("numerical error, input ill-conditioned?")
# Eqn 15 of Marple, with fwd_pred_error and bwd_pred_error
# corresponding to f_{M-1,k+1} and b{M-1,k} and the result as a_{M,M}
# reflect_coeff = dtype(-2) * np.dot(bwd_pred_error, fwd_pred_error) / dtype(den)
reflect_coeff = dtype(-2) * np.dot(bwd_pred_error, fwd_pred_error) / dtype(den)
# Now we use the reflection coefficient and the AR coefficients from
# the last model order to compute all of the AR coefficients for the
# current one. This is the Levinson-Durbin recursion described in
# eqn 5.
# Note 1: We don't have to care about complex conjugates as our signals
# are all real-valued
# Note 2: j counts 1..order+1, i-j+1 counts order..0
# Note 3: The first element of ar_coeffs* is always 1, which copies in
# the reflection coefficient at the end of the new AR coefficient array
# after the preceding coefficients
ar_coeffs_prev, ar_coeffs = ar_coeffs, ar_coeffs_prev
for j in range(1, i + 2):
ar_coeffs[j] = ar_coeffs_prev[j] + reflect_coeff * ar_coeffs_prev[i - j + 1]
# Update the forward and backward prediction errors corresponding to
# eqns 13 and 14. We start with f_{M-1,k+1} and b_{M-1,k} and use them
# to compute f_{M,k} and b_{M,k}
fwd_pred_error_tmp = fwd_pred_error
fwd_pred_error = fwd_pred_error + reflect_coeff * bwd_pred_error
bwd_pred_error = bwd_pred_error + reflect_coeff * fwd_pred_error_tmp
# SNIP - we are now done with order M and advance. M-1 <- M
# Compute DEN_{M} using the recursion from eqn 17.
#
# reflect_coeff = a_{M-1,M-1} (we have advanced M)
# den = DEN_{M-1} (rhs)
# bwd_pred_error = b_{M-1,N-M+1} (we have advanced M)
# fwd_pred_error = f_{M-1,k} (we have advanced M)
# den <- DEN_{M} (lhs)
#
q = dtype(1) - reflect_coeff ** 2
den = q * den - bwd_pred_error[-1] ** 2 - fwd_pred_error[0] ** 2
# Shift up forward error.
#
# fwd_pred_error <- f_{M-1,k+1}
# bwd_pred_error <- b_{M-1,k}
#
# N.B. We do this after computing the denominator using eqn 17 but
# before using it in the numerator in eqn 15.
fwd_pred_error = fwd_pred_error[1:]
bwd_pred_error = bwd_pred_error[:-1]
return ar_coeffs
@cache(level=20)
def zero_crossings(
y, threshold=1e-10, ref_magnitude=None, pad=True, zero_pos=True, axis=-1
):
"""Find the zero-crossings of a signal ``y``: indices ``i`` such that
``sign(y[i]) != sign(y[j])``.
If ``y`` is multi-dimensional, then zero-crossings are computed along
the specified ``axis``.
Parameters
----------
y : np.ndarray
The input array
threshold : float > 0 or None
If specified, values where ``-threshold <= y <= threshold`` are
clipped to 0.
ref_magnitude : float > 0 or callable
If numeric, the threshold is scaled relative to ``ref_magnitude``.
If callable, the threshold is scaled relative to
``ref_magnitude(np.abs(y))``.
pad : boolean
If ``True``, then ``y[0]`` is considered a valid zero-crossing.
zero_pos : boolean
If ``True`` then the value 0 is interpreted as having positive sign.
If ``False``, then 0, -1, and +1 all have distinct signs.
axis : int
Axis along which to compute zero-crossings.
Returns
-------
zero_crossings : np.ndarray [shape=y.shape, dtype=boolean]
Indicator array of zero-crossings in ``y`` along the selected axis.
Notes
-----
This function caches at level 20.
Examples
--------
>>> # Generate a time-series
>>> y = np.sin(np.linspace(0, 4 * 2 * np.pi, 20))
>>> y
array([ 0.000e+00, 9.694e-01, 4.759e-01, -7.357e-01,
-8.372e-01, 3.247e-01, 9.966e-01, 1.646e-01,
-9.158e-01, -6.142e-01, 6.142e-01, 9.158e-01,
-1.646e-01, -9.966e-01, -3.247e-01, 8.372e-01,
7.357e-01, -4.759e-01, -9.694e-01, -9.797e-16])
>>> # Compute zero-crossings
>>> z = librosa.zero_crossings(y)
>>> z
array([ True, False, False, True, False, True, False, False,
True, False, True, False, True, False, False, True,
False, True, False, True], dtype=bool)
>>> # Stack y against the zero-crossing indicator
>>> librosa.util.stack([y, z], axis=-1)
array([[ 0.000e+00, 1.000e+00],
[ 9.694e-01, 0.000e+00],
[ 4.759e-01, 0.000e+00],
[ -7.357e-01, 1.000e+00],
[ -8.372e-01, 0.000e+00],
[ 3.247e-01, 1.000e+00],
[ 9.966e-01, 0.000e+00],
[ 1.646e-01, 0.000e+00],
[ -9.158e-01, 1.000e+00],
[ -6.142e-01, 0.000e+00],
[ 6.142e-01, 1.000e+00],
[ 9.158e-01, 0.000e+00],
[ -1.646e-01, 1.000e+00],
[ -9.966e-01, 0.000e+00],
[ -3.247e-01, 0.000e+00],
[ 8.372e-01, 1.000e+00],
[ 7.357e-01, 0.000e+00],
[ -4.759e-01, 1.000e+00],
[ -9.694e-01, 0.000e+00],
[ -9.797e-16, 1.000e+00]])
>>> # Find the indices of zero-crossings
>>> np.nonzero(z)
(array([ 0, 3, 5, 8, 10, 12, 15, 17, 19]),)
"""
# Clip within the threshold
if threshold is None:
threshold = 0.0
if callable(ref_magnitude):
threshold = threshold * ref_magnitude(np.abs(y))
elif ref_magnitude is not None:
threshold = threshold * ref_magnitude
if threshold > 0:
y = y.copy()
y[np.abs(y) <= threshold] = 0
# Extract the sign bit
if zero_pos:
y_sign = np.signbit(y)
else:
y_sign = np.sign(y)
# Find the change-points by slicing
slice_pre = [slice(None)] * y.ndim
slice_pre[axis] = slice(1, None)
slice_post = [slice(None)] * y.ndim
slice_post[axis] = slice(-1)
# Since we've offset the input by one, pad back onto the front
padding = [(0, 0)] * y.ndim
padding[axis] = (1, 0)
return np.pad(
(y_sign[tuple(slice_post)] != y_sign[tuple(slice_pre)]),
padding,
mode="constant",
constant_values=pad,
)
def clicks(
times=None,
frames=None,
sr=22050,
hop_length=512,
click_freq=1000.0,
click_duration=0.1,
click=None,
length=None,
):
"""Construct a "click track".
This returns a signal with the signal ``click`` sound placed at
each specified time.
Parameters
----------
times : np.ndarray or None
times to place clicks, in seconds
frames : np.ndarray or None
frame indices to place clicks
sr : number > 0
desired sampling rate of the output signal
hop_length : int > 0
if positions are specified by ``frames``, the number of samples between frames.
click_freq : float > 0
frequency (in Hz) of the default click signal. Default is 1KHz.
click_duration : float > 0
duration (in seconds) of the default click signal. Default is 100ms.
click : np.ndarray or None
optional click signal sample to use instead of the default click.
length : int > 0
desired number of samples in the output signal
Returns
-------
click_signal : np.ndarray
Synthesized click signal
Raises
------
ParameterError
- If neither ``times`` nor ``frames`` are provided.
- If any of ``click_freq``, ``click_duration``, or ``length`` are out of range.
Examples
--------
>>> # Sonify detected beat events
>>> y, sr = librosa.load(librosa.ex('choice'), duration=10)
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
>>> y_beats = librosa.clicks(frames=beats, sr=sr)
>>> # Or generate a signal of the same length as y
>>> y_beats = librosa.clicks(frames=beats, sr=sr, length=len(y))
>>> # Or use timing instead of frame indices
>>> times = librosa.frames_to_time(beats, sr=sr)
>>> y_beat_times = librosa.clicks(times=times, sr=sr)
>>> # Or with a click frequency of 880Hz and a 500ms sample
>>> y_beat_times880 = librosa.clicks(times=times, sr=sr,
... click_freq=880, click_duration=0.5)
Display click waveform next to the spectrogram
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(nrows=2, sharex=True)
>>> S = librosa.feature.melspectrogram(y=y, sr=sr)
>>> librosa.display.specshow(librosa.power_to_db(S, ref=np.max),
... x_axis='time', y_axis='mel', ax=ax[0])
>>> librosa.display.waveshow(y_beat_times, sr=sr, label='Beat clicks',
... ax=ax[1])
>>> ax[1].legend()
>>> ax[0].label_outer()
>>> ax[0].set_title(None)
"""
# Compute sample positions from time or frames
if times is None:
if frames is None:
raise ParameterError('either "times" or "frames" must be provided')
positions = frames_to_samples(frames, hop_length=hop_length)
else:
# Convert times to positions
positions = time_to_samples(times, sr=sr)
if click is not None:
# Check that we have a well-formed audio buffer
util.valid_audio(click, mono=True)
else:
# Create default click signal
if click_duration <= 0:
raise ParameterError("click_duration must be strictly positive")
if click_freq <= 0:
raise ParameterError("click_freq must be strictly positive")
angular_freq = 2 * np.pi * click_freq / float(sr)
click = np.logspace(0, -10, num=int(np.round(sr * click_duration)), base=2.0)
click *= np.sin(angular_freq * np.arange(len(click)))
# Set default length
if length is None:
length = positions.max() + click.shape[0]
else:
if length < 1:
raise ParameterError("length must be a positive integer")
# Filter out any positions past the length boundary
positions = positions[positions < length]
# Pre-allocate click signal
click_signal = np.zeros(length, dtype=np.float32)
# Place clicks
for start in positions:
# Compute the end-point of this click
end = start + click.shape[0]
if end >= length:
click_signal[start:] += click[: length - start]
else:
# Normally, just add a click here
click_signal[start:end] += click
return click_signal
def tone(frequency, sr=22050, length=None, duration=None, phi=None):
"""Construct a pure tone (cosine) signal at a given frequency.
Parameters
----------
frequency : float > 0
frequency
sr : number > 0
desired sampling rate of the output signal
length : int > 0
desired number of samples in the output signal.
When both ``duration`` and ``length`` are defined,
``length`` takes priority.
duration : float > 0
desired duration in seconds.
When both ``duration`` and ``length`` are defined,
``length`` takes priority.
phi : float or None
phase offset, in radians. If unspecified, defaults to ``-np.pi * 0.5``.
Returns
-------
tone_signal : np.ndarray [shape=(length,), dtype=float64]
Synthesized pure sine tone signal
Raises
------
ParameterError
- If ``frequency`` is not provided.
- If neither ``length`` nor ``duration`` are provided.
Examples
--------
Generate a pure sine tone A4
>>> tone = librosa.tone(440, duration=1)
Or generate the same signal using `length`
>>> tone = librosa.tone(440, sr=22050, length=22050)
Display spectrogram
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> S = librosa.feature.melspectrogram(y=tone)
>>> librosa.display.specshow(librosa.power_to_db(S, ref=np.max),
... x_axis='time', y_axis='mel', ax=ax)
"""
if frequency is None:
raise ParameterError('"frequency" must be provided')
# Compute signal length
if length is None:
if duration is None:
raise ParameterError('either "length" or "duration" must be provided')
length = duration * sr
if phi is None:
phi = -np.pi * 0.5
return np.cos(2 * np.pi * frequency * np.arange(length) / sr + phi)
def chirp(fmin, fmax, sr=22050, length=None, duration=None, linear=False, phi=None):
"""Construct a "chirp" or "sine-sweep" signal.
The chirp sweeps from frequency ``fmin`` to ``fmax`` (in Hz).
Parameters
----------
fmin : float > 0
initial frequency
fmax : float > 0
final frequency
sr : number > 0
desired sampling rate of the output signal
length : int > 0
desired number of samples in the output signal.
When both ``duration`` and ``length`` are defined,
``length`` takes priority.
duration : float > 0
desired duration in seconds.
When both ``duration`` and ``length`` are defined,
``length`` takes priority.
linear : boolean
- If ``True``, use a linear sweep, i.e., frequency changes linearly with time
- If ``False``, use a exponential sweep.
Default is ``False``.
phi : float or None
phase offset, in radians.
If unspecified, defaults to ``-np.pi * 0.5``.
Returns
-------
chirp_signal : np.ndarray [shape=(length,), dtype=float64]
Synthesized chirp signal
Raises
------
ParameterError
- If either ``fmin`` or ``fmax`` are not provided.
- If neither ``length`` nor ``duration`` are provided.
See Also
--------
scipy.signal.chirp
Examples
--------
Generate a exponential chirp from A2 to A8
>>> exponential_chirp = librosa.chirp(110, 110*64, duration=1)
Or generate the same signal using ``length``
>>> exponential_chirp = librosa.chirp(110, 110*64, sr=22050, length=22050)
Or generate a linear chirp instead
>>> linear_chirp = librosa.chirp(110, 110*64, duration=1, linear=True)
Display spectrogram for both exponential and linear chirps.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
>>> S_exponential = np.abs(librosa.stft(y=exponential_chirp))
>>> librosa.display.specshow(librosa.amplitude_to_db(S_exponential, ref=np.max),
... x_axis='time', y_axis='linear', ax=ax[0])
>>> ax[0].set(title='Exponential chirp', xlabel=None)
>>> ax[0].label_outer()
>>> S_linear = np.abs(librosa.stft(y=linear_chirp))
>>> librosa.display.specshow(librosa.amplitude_to_db(S_linear, ref=np.max),
... x_axis='time', y_axis='linear', ax=ax[1])
>>> ax[1].set(title='Linear chirp')
"""
if fmin is None or fmax is None:
raise ParameterError('both "fmin" and "fmax" must be provided')
# Compute signal duration
period = 1.0 / sr
if length is None:
if duration is None:
raise ParameterError('either "length" or "duration" must be provided')
else:
duration = period * length
if phi is None:
phi = -np.pi * 0.5
method = "linear" if linear else "logarithmic"
return scipy.signal.chirp(
np.arange(duration, step=period),
fmin,
duration,
fmax,
method=method,
phi=phi / np.pi * 180, # scipy.signal.chirp uses degrees for phase offset
)
def mu_compress(x, mu=255, quantize=True):
"""mu-law compression
Given an input signal ``-1 <= x <= 1``, the mu-law compression
is calculated by::
sign(x) * ln(1 + mu * abs(x)) / ln(1 + mu)
Parameters
----------
x : np.ndarray with values in [-1, +1]
The input signal to compress
mu : positive number
The compression parameter. Values of the form ``2**n - 1``
(e.g., 15, 31, 63, etc.) are most common.
quantize : bool
If ``True``, quantize the compressed values into ``1 + mu``
distinct integer values.
If ``False``, mu-law compression is applied without quantization.
Returns
-------
x_compressed : np.ndarray
The compressed signal.
Raises
------
ParameterError
If ``x`` has values outside the range [-1, +1]
If ``mu <= 0``
See Also
--------
mu_expand
Examples
--------
Compression without quantization
>>> x = np.linspace(-1, 1, num=16)
>>> x
array([-1. , -0.86666667, -0.73333333, -0.6 , -0.46666667,
-0.33333333, -0.2 , -0.06666667, 0.06666667, 0.2 ,
0.33333333, 0.46666667, 0.6 , 0.73333333, 0.86666667,
1. ])
>>> y = librosa.mu_compress(x, quantize=False)
>>> y
array([-1. , -0.97430198, -0.94432361, -0.90834832, -0.86336132,
-0.80328309, -0.71255496, -0.52124063, 0.52124063, 0.71255496,
0.80328309, 0.86336132, 0.90834832, 0.94432361, 0.97430198,
1. ])
Compression with quantization
>>> y = librosa.mu_compress(x, quantize=True)
>>> y
array([-128, -124, -120, -116, -110, -102, -91, -66, 66, 91, 102,
110, 116, 120, 124, 127])
Compression with quantization and a smaller range
>>> y = librosa.mu_compress(x, mu=15, quantize=True)
>>> y
array([-8, -7, -7, -6, -6, -5, -4, -2, 2, 4, 5, 6, 6, 7, 7, 7])
"""
if mu <= 0:
raise ParameterError(
"mu-law compression parameter mu={} "
"must be strictly positive.".format(mu)
)
if np.any(x < -1) or np.any(x > 1):
raise ParameterError(
"mu-law input x={} must be in the " "range [-1, +1].".format(x)
)
x_comp = np.sign(x) * np.log1p(mu * np.abs(x)) / np.log1p(mu)
if quantize:
return (
np.digitize(
x_comp, np.linspace(-1, 1, num=int(1 + mu), endpoint=True), right=True
)
- int(mu + 1) // 2
)
return x_comp
def mu_expand(x, mu=255.0, quantize=True):
"""mu-law expansion
This function is the inverse of ``mu_compress``. Given a mu-law compressed
signal ``-1 <= x <= 1``, the mu-law expansion is calculated by::
sign(x) * (1 / mu) * ((1 + mu)**abs(x) - 1)
Parameters
----------
x : np.ndarray
The compressed signal.
If ``quantize=True``, values must be in the range [-1, +1].
mu : positive number
The compression parameter. Values of the form ``2**n - 1``
(e.g., 15, 31, 63, etc.) are most common.
quantize : boolean
If ``True``, the input is assumed to be quantized to
``1 + mu`` distinct integer values.
Returns
-------
x_expanded : np.ndarray with values in the range [-1, +1]
The mu-law expanded signal.
Raises
------
ParameterError
If ``x`` has values outside the range [-1, +1] and ``quantize=False``
If ``mu <= 0``
See Also
--------
mu_compress
Examples
--------
Compress and expand without quantization
>>> x = np.linspace(-1, 1, num=16)
>>> x
array([-1. , -0.86666667, -0.73333333, -0.6 , -0.46666667,
-0.33333333, -0.2 , -0.06666667, 0.06666667, 0.2 ,
0.33333333, 0.46666667, 0.6 , 0.73333333, 0.86666667,
1. ])
>>> y = librosa.mu_compress(x, quantize=False)
>>> y
array([-1. , -0.97430198, -0.94432361, -0.90834832, -0.86336132,
-0.80328309, -0.71255496, -0.52124063, 0.52124063, 0.71255496,
0.80328309, 0.86336132, 0.90834832, 0.94432361, 0.97430198,
1. ])
>>> z = librosa.mu_expand(y, quantize=False)
>>> z
array([-1. , -0.86666667, -0.73333333, -0.6 , -0.46666667,
-0.33333333, -0.2 , -0.06666667, 0.06666667, 0.2 ,
0.33333333, 0.46666667, 0.6 , 0.73333333, 0.86666667,
1. ])
Compress and expand with quantization. Note that this necessarily
incurs quantization error, particularly for values near +-1.
>>> y = librosa.mu_compress(x, quantize=True)
>>> y
array([-128, -124, -120, -116, -110, -102, -91, -66, 66, 91, 102,
110, 116, 120, 124, 127])
>>> z = librosa.mu_expand(y, quantize=True)
array([-1. , -0.84027248, -0.70595818, -0.59301377, -0.4563785 ,
-0.32155973, -0.19817918, -0.06450245, 0.06450245, 0.19817918,
0.32155973, 0.4563785 , 0.59301377, 0.70595818, 0.84027248,
0.95743702])
"""
if mu <= 0:
raise ParameterError(
"Inverse mu-law compression parameter "
"mu={} must be strictly positive.".format(mu)
)
if quantize:
x = x * 2.0 / (1 + mu)
if np.any(x < -1) or np.any(x > 1):
raise ParameterError(
"Inverse mu-law input x={} must be " "in the range [-1, +1].".format(x)
)
return np.sign(x) / mu * (np.power(1 + mu, np.abs(x)) - 1)
| isc |
Eric89GXL/mne-python | mne/viz/_brain/colormap.py | 10 | 6336 | # Authors: Alexandre Gramfort <[email protected]>
# Eric Larson <[email protected]>
# Oleh Kozynets <[email protected]>
# Guillaume Favelier <[email protected]>
#
# License: Simplified BSD
import numpy as np
def create_lut(cmap, n_colors=256, center=None):
"""Return a colormap suitable for setting as a LUT."""
from .._3d import _get_cmap
assert not (isinstance(cmap, str) and cmap == 'auto')
cmap = _get_cmap(cmap)
lut = np.round(cmap(np.linspace(0, 1, n_colors)) * 255.0).astype(np.int64)
return lut
def scale_sequential_lut(lut_table, fmin, fmid, fmax):
"""Scale a sequential colormap."""
assert fmin <= fmid <= fmax # guaranteed by calculate_lut
lut_table_new = lut_table.copy()
n_colors = lut_table.shape[0]
n_colors2 = n_colors // 2
if fmax == fmin:
fmid_idx = 0
else:
fmid_idx = np.clip(int(np.round(
n_colors * ((fmid - fmin) / (fmax - fmin))) - 1), 0, n_colors - 2)
n_left = fmid_idx + 1
n_right = n_colors - n_left
for i in range(4):
lut_table_new[:fmid_idx + 1, i] = np.interp(
np.linspace(0, n_colors2 - 1, n_left),
np.arange(n_colors), lut_table[:, i])
lut_table_new[fmid_idx + 1:, i] = np.interp(
np.linspace(n_colors - 1, n_colors2, n_right)[::-1],
np.arange(n_colors), lut_table[:, i])
return lut_table_new
def get_fill_colors(cols, n_fill):
"""Get the fill colors for the middle of divergent colormaps."""
steps = np.linalg.norm(np.diff(cols[:, :3].astype(float), axis=0),
axis=1)
ind = np.flatnonzero(steps[1:-1] > steps[[0, -1]].mean() * 3)
if ind.size > 0:
# choose the two colors between which there is the large step
ind = ind[0] + 1
fillcols = np.r_[np.tile(cols[ind, :], (n_fill / 2, 1)),
np.tile(cols[ind + 1, :],
(n_fill - n_fill / 2, 1))]
else:
# choose a color from the middle of the colormap
fillcols = np.tile(cols[int(cols.shape[0] / 2), :], (n_fill, 1))
return fillcols
def calculate_lut(lut_table, alpha, fmin, fmid, fmax, center=None,
transparent=True):
u"""Transparent color map calculation.
A colormap may be sequential or divergent. When the colormap is
divergent indicate this by providing a value for 'center'. The
meanings of fmin, fmid and fmax are different for sequential and
divergent colormaps. A sequential colormap is characterised by::
[fmin, fmid, fmax]
where fmin and fmax define the edges of the colormap and fmid
will be the value mapped to the center of the originally chosen colormap.
A divergent colormap is characterised by::
[center-fmax, center-fmid, center-fmin, center,
center+fmin, center+fmid, center+fmax]
i.e., values between center-fmin and center+fmin will not be shown
while center-fmid will map to the fmid of the first half of the
original colormap and center-fmid to the fmid of the second half.
Parameters
----------
lim_cmap : Colormap
Color map obtained from _process_mapdata.
alpha : float
Alpha value to apply globally to the overlay. Has no effect with mpl
backend.
fmin : float
Min value in colormap.
fmid : float
Intermediate value in colormap.
fmax : float
Max value in colormap.
center : float or None
If not None, center of a divergent colormap, changes the meaning of
fmin, fmax and fmid.
transparent : boolean
if True: use a linear transparency between fmin and fmid and make
values below fmin fully transparent (symmetrically for divergent
colormaps)
Returns
-------
cmap : matplotlib.ListedColormap
Color map with transparency channel.
"""
if not fmin <= fmid <= fmax:
raise ValueError('Must have fmin (%s) <= fmid (%s) <= fmax (%s)'
% (fmin, fmid, fmax))
lut_table = create_lut(lut_table)
assert lut_table.dtype.kind == 'i'
divergent = center is not None
n_colors = lut_table.shape[0]
# Add transparency if needed
n_colors2 = n_colors // 2
if transparent:
if divergent:
N4 = np.full(4, n_colors // 4)
N4[[0, 3, 1, 2][:np.mod(n_colors, 4)]] += 1
assert N4.sum() == n_colors
lut_table[:, -1] = np.round(np.hstack([
np.full(N4[0], 255.),
np.linspace(0, 255, N4[1])[::-1],
np.linspace(0, 255, N4[2]),
np.full(N4[3], 255.)]))
else:
lut_table[:n_colors2, -1] = np.round(np.linspace(
0, 255, n_colors2))
lut_table[n_colors2:, -1] = 255
alpha = float(alpha)
if alpha < 1.0:
lut_table[:, -1] = np.round(lut_table[:, -1] * alpha)
if divergent:
if fmax == fmin:
lut_table = np.r_[
lut_table[:1],
get_fill_colors(
lut_table[n_colors2 - 3:n_colors2 + 3, :], n_colors - 2),
lut_table[-1:]]
else:
n_fill = int(round(fmin * n_colors2 / (fmax - fmin))) * 2
lut_table = np.r_[
scale_sequential_lut(lut_table[:n_colors2, :],
center - fmax, center - fmid,
center - fmin),
get_fill_colors(
lut_table[n_colors2 - 3:n_colors2 + 3, :], n_fill),
scale_sequential_lut(lut_table[n_colors2:, :][::-1],
center - fmax, center - fmid,
center - fmin)[::-1]]
else:
lut_table = scale_sequential_lut(lut_table, fmin, fmid, fmax)
n_colors = lut_table.shape[0]
if n_colors != 256:
lut = np.zeros((256, 4))
x = np.linspace(1, n_colors, 256)
for chan in range(4):
lut[:, chan] = np.interp(x,
np.arange(1, n_colors + 1),
lut_table[:, chan])
lut_table = lut
lut_table = lut_table.astype(np.float64) / 255.0
return lut_table
| bsd-3-clause |
cha007/vigra | vigranumpy/examples/non_local_mean_2d_color.py | 10 | 1407 | import vigra
from vigra import numpy
from matplotlib import pylab
from time import time
import multiprocessing
path = "69015.jpg"
#path = "12074.jpg"
path = "100075.jpg"
path = "12003.jpg"
data = vigra.impex.readImage(path).astype(numpy.float32)
cpus = multiprocessing.cpu_count()
print "nCpus",cpus
t0 =time()
#for c in range(3):
# cimg=data[:,:,c]
# cimg-=cimg.min()
# cimg/=cimg.max()
iters = 10
#policy = vigra.filters.RatioPolicy(sigma=10.0, meanRatio=0.95, varRatio=0.5)
policy = vigra.filters.NormPolicy(sigma=50.0, meanDist=50, varRatio=0.5)
#data-=100.0
res = vigra.filters.nonLocalMean2d(data,policy=policy,searchRadius=5,patchRadius=1,nThreads=cpus+1,stepSize=2,verbose=True,sigmaMean=10.0)
for i in range(iters-1):
res = vigra.filters.nonLocalMean2d(res,policy=policy,searchRadius=5,patchRadius=2,nThreads=cpus+1,stepSize=2,verbose=True,sigmaMean=10.0)
t1 = time()
res = vigra.taggedView(res,'xyc')
gma = vigra.filters.gaussianGradientMagnitude(res,4.0)
gmb = vigra.filters.gaussianGradientMagnitude(data,4.0)
#data+=100.0
print t1-t0
imgs = [data,res,gma,gmb]
for img in imgs:
for c in range(img.shape[2]):
cimg=img[:,:,c]
cimg-=cimg.min()
cimg/=cimg.max()
f = pylab.figure()
for n, arr in enumerate(imgs):
arr = arr.squeeze()
f.add_subplot(1, len(imgs), n)
pylab.imshow(arr.swapaxes(0,1))
pylab.title('denoised')
pylab.show()
| mit |
schreiberx/sweet | benchmarks_sphere/paper_jrn_sl_exp/compare_wt_dt_vs_accuracy_galewsky_M512_6days_l_n_uv/postprocessing_consolidate_prog_div.py | 8 | 6177 | #! /usr/bin/env python3
import sys
import math
from mule.plotting.Plotting import *
from mule.postprocessing.JobsData import *
from mule.postprocessing.JobsDataConsolidate import *
sys.path.append('../')
import pretty_plotting as pp
sys.path.pop()
mule_plotting_usetex(False)
groups = ['runtime.timestepping_method']
tagnames_y = [
'sphere_data_diff_prog_div.res_norm_l1',
'sphere_data_diff_prog_div.res_norm_l2',
'sphere_data_diff_prog_div.res_norm_linf',
]
j = JobsData('./job_bench_*', verbosity=0)
c = JobsDataConsolidate(j)
print("")
print("Groups:")
job_groups = c.create_groups(groups)
for key, g in job_groups.items():
print(key)
for tagname_y in tagnames_y:
params = []
params += [
{
'tagname_x': 'runtime.timestep_size',
'xlabel': "Timestep size (seconds)",
'ylabel': pp.latex_pretty_names[tagname_y],
'title': 'Timestep size vs. error',
'xscale': 'log',
'yscale': 'log',
'convergence': True,
},
]
params += [
{
'tagname_x': 'output.simulation_benchmark_timings.main_timestepping',
'xlabel': "Wallclock time (seconds)",
'ylabel': pp.latex_pretty_names[tagname_y],
'title': 'Wallclock time vs. error',
'xscale': 'log',
'yscale': 'log',
'convergence': False,
},
]
for param in params:
tagname_x = param['tagname_x']
xlabel = param['xlabel']
ylabel = param['ylabel']
title = param['title']
xscale = param['xscale']
yscale = param['yscale']
convergence = param['convergence']
print("*"*80)
print("Processing tag "+tagname_x)
print("*"*80)
if True:
"""
Plotting format
"""
# Filter out errors beyond this value!
def data_filter(x, y, jobdata):
if y == None:
return True
x = float(x)
y = float(y)
if math.isnan(y):
return True
if 'l1' in tagname_y:
if y > 1e1:
print("Sorting out L1 data "+str(y))
return True
elif 'l2' in tagname_y:
if y > 1e1:
print("Sorting out L2 data "+str(y))
return True
elif 'linf' in tagname_y:
if y > 1e2:
print("Sorting out Linf data "+str(y))
return True
else:
raise Exception("Unknown y tag "+tagname_y)
return False
d = JobsData_GroupsPlottingScattered(
job_groups,
tagname_x,
tagname_y,
data_filter = data_filter
)
fileid = "output_plotting_"+tagname_x.replace('.', '-').replace('_', '-')+"_vs_"+tagname_y.replace('.', '-').replace('_', '-')
if True:
#
# Proper naming and sorting of each label
#
# new data dictionary
data_new = {}
for key, data in d.data.items():
# generate nice tex label
#data['label'] = pp.get_pretty_name(key)
data['label'] = key #pp.get_pretty_name(key)
key_new = pp.get_pretty_name_order(key)+'_'+key
# copy data
data_new[key_new] = copy.copy(data)
# Copy back new data table
d.data = data_new
p = Plotting_ScatteredData()
def fun(p):
from matplotlib import ticker
from matplotlib.ticker import FormatStrFormatter
plt.tick_params(axis='x', which='minor')
p.ax.xaxis.set_minor_formatter(FormatStrFormatter("%.0f"))
p.ax.xaxis.set_major_formatter(FormatStrFormatter("%.0f"))
p.ax.xaxis.set_minor_locator(ticker.LogLocator(subs=[1.5, 2.0, 3.0, 5.0]))
for tick in p.ax.xaxis.get_minor_ticks():
tick.label.set_fontsize(8)
plt.tick_params(axis='y', which='minor')
p.ax.yaxis.set_minor_formatter(FormatStrFormatter("%.1e"))
p.ax.yaxis.set_major_formatter(FormatStrFormatter("%.1e"))
p.ax.yaxis.set_minor_locator(ticker.LogLocator(subs=[1.5, 2.0, 3.0, 5.0]))
for tick in p.ax.yaxis.get_minor_ticks():
tick.label.set_fontsize(6)
#
# Add convergence information
#
if convergence:
if 'l1' in tagname_y:
ps = [100, 1e-9]
elif 'l2' in tagname_y:
ps = [100, 5e-8]
elif 'linf' in tagname_y:
ps = [100, 1e-7]
else:
ps = [100, 1e-0]
p.add_convergence(2, ps)
annotate_text_template = "{:.1f} / {:.3f}"
p.plot(
data_plotting = d.get_data_float(),
xlabel = xlabel,
ylabel = ylabel,
title = title,
xscale = xscale,
yscale = yscale,
#annotate = True,
#annotate_each_nth_value = 3,
#annotate_fontsize = 6,
#annotate_text_template = annotate_text_template,
legend_fontsize = 8,
grid = True,
outfile = fileid+".pdf",
lambda_fun = fun,
)
print("Data plotting:")
d.print()
d.write(fileid+".csv")
print("Info:")
print(" NaN: Errors in simulations")
print(" None: No data available")
| mit |
murali-munna/scikit-learn | sklearn/manifold/locally_linear.py | 206 | 25061 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
Frogee/proceduralGenerationPrototyping | ProcGenExample_BSP.py | 1 | 40991 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 23 13:46:42 2015
@author: Ryan McCormick
PROVIDED AS IS WITHOUT WARRANTY OR GUARANTEE THAT IT WILL WORK.
"""
from collections import defaultdict
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import random
from random import randint
from math import sqrt
'''
Classes and functions to procedurally generate a map of rectangular rooms.
The map is generated using the idea of binary space partitioning. It
closely follows the approach of found here:
http://pcgbook.com/
http://pcgbook.com/wp-content/uploads/chapter03.pdf
The algorithm we're sort of following is on page 35 of that pdf and is as follows:
-------
1: start with the entire dungeon area (root node of the BSP tree)
2: divide the area along a horizontal or vertical line
3: select one of the two new partition cells
4: if this cell is bigger than the minimal acceptable size:
5: go to step 2 (using this cell as the area to be divided)
6: select the other partition cell, and go to step 4
7: for every partition cell:
8: create a room within the cell by randomly
choosing two points (top left and bottom right)
within its boundaries
9: starting from the lowest layers, draw corridors to connect
rooms in the nodes of the BSP tree with children of the same
parent
10:repeat 9 until the children of the root node are connected
-------
It uses a binary tree to partition the space. Each leaf represents a
box of space in which a "sub area" (i.e., another box) can be placed.
Once the sub areas are placed, corridors (also boxes) are used to connect them.
'''
'''
BoxHelperClass to do operations on Boxes. This class doesn't do much
other than contain a function that operates on lists of Boxes.
'''
class BoxHelper(object):
def __init__(self):
self.name = None
'''
Given two lists of Boxes, this returns which two Boxes have the closest
centers. This is used to prevent very distance Boxes from being connected
if they are chosen from the list at random. As such, this reduces the chance
of really large corridors that travel over existing rooms.
'''
def returnIndicesOfClosestSubAreas(self, boxListFirst, boxListSecond):
listToReturn = [0, 0]
firstListIndex = 0
secondListIndex = 0
centroidDistance = 1000000000 # Arbitrarily large magic number
for boxFromFirstList in boxListFirst:
#Find first box centroid
centerXFirst = boxFromFirstList.origin[0] + (boxFromFirstList.width / 2.0)
centerYFirst = boxFromFirstList.origin[1] + (boxFromFirstList.height / 2.0)
secondListIndex = 0
for boxFromSecondList in boxListSecond:
#Find second box centroid
centerXSecond = boxFromSecondList.origin[0] + (boxFromSecondList.width / 2.0)
centerYSecond = boxFromSecondList.origin[1] + (boxFromSecondList.height / 2.0)
distXSquared = (centerXSecond - centerXFirst) * (centerXSecond - centerXFirst)
distYSquared = (centerYSecond - centerYFirst) * (centerYSecond - centerYFirst)
distance = sqrt(distXSquared + distYSquared)
if (distance < centroidDistance):
centroidDistance = distance
listToReturn = [firstListIndex, secondListIndex]
secondListIndex += 1
firstListIndex += 1
return listToReturn
'''
Box class to contain information to track location of rectangles.
These are stored in a similar manner to the matplotlib Rectangle class
that is used for plotting.
This class also contains some member functions to divide a box into two
equal boxes, and to randomly generate a "sub area" from the given box area.
'''
class Box(object):
def __init__(self, origin = (0,0), width = 0, height = 0):
self.origin = origin
self.height = height
self.width = width
self.area = width * height
def __repr__(self):
printString = "Box:\nOrigin:(" + str(self.origin[0]) + "," + str(self.origin[1]) + ")\n"
printString += "Width: " + str(self.width) + "\tHeight: " + str(self.height) + "\n"
printString += "Area: " + str(self.area)
return printString
def getHeight(self):
return self.height
def setHeight(self, height):
self.height = height
self.area = self.height * self.width
def getOrigin(self):
return self.origin
def setOrigin(self, origin):
self.origin = origin
def getWidth(self):
return self.width
def setWidth(self, width):
self.width = width
self.area = self.height * self.width
def getArea(self):
return self.area
'''
Returns a list of boxes that are a random division of itself in half.
'''
def partitionBox(self):
boxesToReturn = []
divideParallelWithWidth = random.choice([True, False])
# A few extra conditions to try to avoid getting to unbalanced:
# If very wide, divide parallel with height
if (self.width > 3.0 * self.height): # 3 is a magic number size factor.
divideParallelWithWidth = False
# If vary tall, divide parallel with width
if (self.height > 3.0 * self.width): # 3 is a magic number size factor.
divideParallelWithWidth = True
if (divideParallelWithWidth == True):
print("Dividing along width")
# If first box were (0,0), 20, 50
# Then partition should be:
# (0, 0), 20, 25
# (0, 0 + 25), 20, 25
originalOrigin = self.origin
halfHeight = self.height / 2.0
firstBox = Box((originalOrigin[0], originalOrigin[1]), self.width, halfHeight)
secondBox = Box((originalOrigin[0], originalOrigin[1] + halfHeight), self.width, halfHeight)
boxesToReturn.append(firstBox)
boxesToReturn.append(secondBox)
else:
print("Dividing along height")
# If first box were (0,0), 20, 50
# Then partition should be:
# (0, 0), 10, 50
# (0 + 10, 0), 10, 50
originalOrigin = self.origin
halfWidth = self.width / 2.0
firstBox = Box((originalOrigin[0], originalOrigin[1]), halfWidth, self.height)
secondBox = Box((originalOrigin[0] + halfWidth, originalOrigin[1]), halfWidth, self.height)
boxesToReturn.append(firstBox)
boxesToReturn.append(secondBox)
return boxesToReturn
'''
Create a sub area within a box. This sub area represents a room, and is a Box itself.
'''
def constructSubArea(self):
#8: create a room within the cell by randomly
# choosing two points (top left and bottom right)
# within its boundaries
# We're going to randomly chose a point, and a width and a height
# constrained by the size of the current box.
randomWidth = 0
randomHeight = 0
boxToReturn = Box()
MAGICPADDINGNUMBER = 3 # I guess this should be a global parameter or something?
MAGICWIDTHTHRESHOLD = 6 # More magic numbers
MAGICHEIGHTTHRESHOLD = 6 # Even more magic numbers
while (boxToReturn.area < (0.20 * self.area)): # Another magic number for minimum size of the sub area
originalOrigin = self.origin
xLowerBound = int(originalOrigin[0])
xUpperBound = int(originalOrigin[0] + self.width)
yLowerBound = int(originalOrigin[1])
yUpperBound = int(originalOrigin[1] + self.height)
randomOriginX = randint(xLowerBound, xUpperBound)
randomOriginY = randint(yLowerBound, yUpperBound)
widthUpperBound = int((self.width + self.origin[0]) - randomOriginX)
heightUpperBound = int((self.height + self.origin[1]) - randomOriginY)
randomWidth = randint(0, widthUpperBound)
randomHeight = randint(0, heightUpperBound)
boxToReturn.setHeight(randomHeight)
boxToReturn.setWidth(randomWidth)
boxToReturn.setOrigin((randomOriginX, randomOriginY))
# Just to make sure the boxes are away from the wall a bit.
distanceFromRightWall = (self.origin[0] + self.width) - (boxToReturn.origin[0] + boxToReturn.width)
distanceFromLeftWall = (boxToReturn.origin[0] - self.origin[0])
distanceFromTopWall = (self.origin[1] + self.height) - (boxToReturn.origin[1] + boxToReturn.height)
distanceFromBottomWall = (boxToReturn.origin[1] - self.origin[1])
print(distanceFromRightWall, distanceFromLeftWall, distanceFromTopWall, distanceFromBottomWall)
# Perform another round if things aren't quite how we want them.
if (distanceFromRightWall < MAGICPADDINGNUMBER
or distanceFromLeftWall < MAGICPADDINGNUMBER
or distanceFromTopWall < MAGICPADDINGNUMBER
or distanceFromBottomWall < MAGICPADDINGNUMBER
or boxToReturn.getHeight() < MAGICHEIGHTTHRESHOLD
or boxToReturn.getWidth() < MAGICWIDTHTHRESHOLD):
boxToReturn.setHeight(1)
boxToReturn.setWidth(1)
print("The following box:")
print(self)
print("Generated the sub area:")
print(boxToReturn)
return boxToReturn
'''
Generic tree implementation inspired by the following sources:
http://stackoverflow.com/questions/2482602/a-general-tree-implementation-in-python
http://cbio.ufs.ac.za/live_docs/nbn_tut/trees.html
This was my first attempt at implementing a tree data structure and recursion, and it suffers
from a lack of initial design specifications. It morphs from a generic
tree in which a node can have arbitrarily many children, to expecting that
nodes will only have two children for many of the functions to work properly.
Also, the idea for nodes to contain the Box information came relatively late
in the process, so that was haphazardly bolted on near the end (e.g., the class
was renamed to "AreaNode")
'''
class AreaNode(object):
def __init__(self, name, children, box = Box(), parent = "NULL" ):
self.name = name
self.children = children # Dictionary of child nodes, where keys are the name of the child node.
self.box = box
self.subArea = box
self.childrenAreConnected = False
self.connection = Box()
def __repr__(self, level = 0):
nodeString = ("\t" * level) + repr(str(self.name)) + "\n"
for nodeName in self.children:
nodeString += self.children[nodeName].__repr__(level + 1)
return nodeString
def searchNode(self, nodeNameToFind, traversalList = [], traversalLevel = 0, nameWasFound = False):
for nodeName in self.children:
if (nameWasFound != True):
nameWasFound = self.children[nodeName].searchNode(nodeNameToFind, traversalList, traversalLevel + 1, nameWasFound)
if (nodeName == nodeNameToFind):
nameWasFound = True
for level in range(0, traversalLevel + 1):
traversalList.append("FILLER")
traversalList[traversalLevel] = nodeName
return True
if (nameWasFound == True):
if (traversalList[traversalLevel] == "FILLER"):
traversalList[traversalLevel] = nodeName
return True
def deleteNode(self, nodeNameToFind, traversalList = [], traversalLevel = 0, nameWasFound = False):
for nodeName in self.children:
if (nameWasFound != True):
nameWasFound = self.children[nodeName].deleteNode(nodeNameToFind, traversalList, traversalLevel + 1, nameWasFound)
if (nodeName == nodeNameToFind):
print("Found " + nodeName + ". Deleting.")
self.children.pop(nodeName, None)
return True
def addNode(self, nodeNameToFind, nodeNameToAdd, box, traversalLevel = 0, nameWasFound = False):
for nodeName in self.children:
if (nameWasFound != True):
nameWasFound = self.children[nodeName].addNode(nodeNameToFind, nodeNameToAdd, box, traversalLevel + 1, nameWasFound)
if (nodeName == nodeNameToFind):
print("Found " + nodeName + ". Adding " + nodeNameToAdd)
newNode = AreaNode(nodeNameToAdd, defaultdict(AreaNode), box)
self.children[nodeNameToFind].children[nodeNameToAdd] = newNode
return True
def getRectangles(self, rectangleList, color):
for nodeName in self.children:
self.children[nodeName].getRectangles(rectangleList, color)
nodeBox = Rectangle(self.children[nodeName].box.getOrigin(),
self.children[nodeName].box.getWidth(),
self.children[nodeName].box.getHeight(), facecolor=color)
rectangleList.append(nodeBox)
def getSubAreaShapes(self, shapeList):
for nodeName in self.children:
self.children[nodeName].getSubAreaShapes(shapeList)
if (len(self.children) == 0):
nodeBox = self.subArea
shapeList.append(nodeBox)
if (self.childrenAreConnected == True):
shapeList.append(self.connection)
def getSubAreaRectangles(self, rectangleList, boxColor, connectorColor):
for nodeName in self.children:
self.children[nodeName].getSubAreaRectangles(rectangleList, boxColor, connectorColor)
if (len(self.children[nodeName].children) == 0):
nodeBox = Rectangle(self.children[nodeName].subArea.getOrigin(),
self.children[nodeName].subArea.getWidth(),
self.children[nodeName].subArea.getHeight(), facecolor=boxColor)
rectangleList.append(nodeBox)
if (self.childrenAreConnected == True):
nodeBox = Rectangle(self.connection.getOrigin(),
self.connection.getWidth(),
self.connection.getHeight(), facecolor=connectorColor)
rectangleList.append(nodeBox)
def partitionNode(self, nodeNameToFind, partitionNames,
box = Box(), traversalLevel = 0,
nameWasFound = False):
for nodeName in self.children:
if (nameWasFound != True):
nameWasFound = self.children[nodeName].partitionNode(nodeNameToFind, partitionNames, box, traversalLevel + 1, nameWasFound)
if (nodeName == nodeNameToFind):
print("Found " + nodeName + ". Partitioning.")
#First, need to check how many children it has.
if (len(self.children[nodeName].children) == 0):
#Then we need to determine if and how to divide the current box.
#First, find if the box is large enough to partition.
MAGICMINIMUMAREA = 10
print("Area of " + nodeName + " is " + str(self.children[nodeName].box.getArea()) )
if (self.children[nodeName].box.getArea() > MAGICMINIMUMAREA):
boxes = self.children[nodeName].box.partitionBox();
self.addNode(nodeName, partitionNames[0], boxes[0])
self.addNode(nodeName, partitionNames[1], boxes[1])
print(self.children[nodeName])
else:
print("Insufficient area to partition. Not partitioning")
else:
print("Node already has children. Not partitioning.")
return True
def getNodeArea(self, nodeNameToFind, area, traversalLevel = 0, nameWasFound = False):
for nodeName in self.children:
if (nameWasFound != True):
nameWasFound = self.children[nodeName].getNodeArea(nodeNameToFind, area, traversalLevel + 1, nameWasFound)
if (nodeName == nodeNameToFind):
print("Found " + nodeName + ". Returning area of " + str(self.children[nodeName].box.getArea()))
area.append(self.children[nodeName].box.getArea())
return True
def constructSubArea(self):
for nodeName in self.children:
self.children[nodeName].constructSubArea()
print("Constructing sub area for: " + nodeName)
subAreaBox = self.children[nodeName].box.constructSubArea()
self.children[nodeName].subArea = subAreaBox
def resetSubArea(self):
for nodeName in self.children:
self.children[nodeName].resetSubArea()
print("Resetting sub area for: " + nodeName)
self.children[nodeName].subArea = Box()
self.children[nodeName].childrenAreConnected = False
self.children[nodeName].connection = Box()
def getListOfLeafPairs(self, listOfLeafPairs):
tempListOfChildren = []
for nodeName in self.children:
self.children[nodeName].getListOfLeafPairs(listOfLeafPairs)
if (len(self.children[nodeName].children) == 0):
tempListOfChildren.append(nodeName)
if (len(tempListOfChildren) == 2):
print(tempListOfChildren[0] + " and " + tempListOfChildren[1] + " have no children")
listOfLeafPairs.append((tempListOfChildren[0], tempListOfChildren[1]))
'''
One of the major functions, and one of the ugliest.
The purpose is to connect all of the sub areas with boxes, and make sure all rooms are connected somehow.
We model this by providing all parent nodes with connectors. The trivial case is
when two leaf nodes are connected (i.e. two sub areas). When the children are nodes
that themselves have children, we consider all of the sub areas and the connections
as potential candidates for making new connections between two nodes.
The strange list variable, li_subAreasSuccessfullyConnected, is used because
I couldn't figure out how to declare a static variable in Python, and I couldn't
assign an integer or boolean. Evidently, lists are mutable and I could modify it
and have it maintained during recursion.
'''
def connectSubArea(self, li_subAreasSuccessfullyConnected):
tempListOfChildren = []
for nodeName in self.children:
self.children[nodeName].connectSubArea(li_subAreasSuccessfullyConnected)
tempListOfChildren.append(nodeName)
if (len(tempListOfChildren) == 2 and li_subAreasSuccessfullyConnected != [False]):
if (self.childrenAreConnected == False):
print("Adding connection that connects children: " + tempListOfChildren[0] + " and " + tempListOfChildren[1] + " of parent node: " + self.name)
# Obtain a list of boxes for each child.
shapeListFirstChild = []
shapeListSecondChild = []
self.children[tempListOfChildren[0]].getSubAreaShapes(shapeListFirstChild)
self.children[tempListOfChildren[1]].getSubAreaShapes(shapeListSecondChild)
# Generate a potential connection between the two lists
print("Attempting to connect:")
print("Child 1's shapes: ")
print(shapeListFirstChild)
print("With Child 2's shapes: ")
print(shapeListSecondChild)
boxHelper = BoxHelper()
# Start by choosing the boxes that have the closest centers.
indexList = boxHelper.returnIndicesOfClosestSubAreas(shapeListFirstChild, shapeListSecondChild)
choiceFromFirstList = shapeListFirstChild[indexList[0]]
choiceFromSecondList = shapeListSecondChild[indexList[1]]
terminationIterator = 0
while (self.childrenAreConnected == False):
if (terminationIterator > 100):
# When this happens, we probably can't make the connections
# necessary for the given sub areas. If we try to reconstruct
# the sub areas, it will invalidate the previous connections made.
# At this point, we should probably abort and start again.
print("Termination iterator condition met. Setting exit status to false.")
if (len(li_subAreasSuccessfullyConnected) == 0):
li_subAreasSuccessfullyConnected.append(False)
else:
li_subAreasSuccessfullyConnected[0] = False
#raw_input("Press enter to continue")
break
# Variable to track whether or not the shapes with the closest area will work
closestFailed = False
# Find overlaps in the X and Y dimensions
# X borders (min, max) Y borders (min, max)
xMinFirstShape = choiceFromFirstList.origin[0]
xMaxFirstShape = choiceFromFirstList.origin[0] + choiceFromFirstList.width
yMinFirstShape = choiceFromFirstList.origin[1]
yMaxFirstShape = choiceFromFirstList.origin[1] + choiceFromFirstList.height
xMinSecondShape = choiceFromSecondList.origin[0]
xMaxSecondShape = choiceFromSecondList.origin[0] + choiceFromSecondList.width
yMinSecondShape = choiceFromSecondList.origin[1]
yMaxSecondShape = choiceFromSecondList.origin[1] + choiceFromSecondList.height
#Magic variable to determine the size of corridors.
CORRIDORSIZE = 4
print("Attempting to connect:")
print(choiceFromFirstList)
print(choiceFromSecondList)
if (xMinFirstShape >= xMinSecondShape and xMinFirstShape <= xMaxSecondShape):
print("First shape X starts after second, and starts before end of second")
# Need to travel along the Y to connect them. First, find the delimiting X space we can connect.
xConnectorLowerLimit = xMinFirstShape
xConnectorUpperLimit = min(xMaxFirstShape, xMaxSecondShape)
if (xConnectorUpperLimit - xConnectorLowerLimit <= CORRIDORSIZE):
closestFailed = True
terminationIterator += 1
else:
xCenter = randint(xConnectorLowerLimit + (CORRIDORSIZE / 2.0), xConnectorUpperLimit - (CORRIDORSIZE / 2.0))
#Find where on the Y axis this needs to be located. Origin needs to be at the minimum maxX, and maximum minX
xOrigin = xCenter -2
yOrigin = min(yMaxFirstShape, yMaxSecondShape)
yWidth = max(yMinFirstShape, yMinSecondShape) - yOrigin
print("The constructed connector will be:")
newConnector = Box((xOrigin, yOrigin), CORRIDORSIZE, yWidth)
print(newConnector)
self.connection = newConnector
self.childrenAreConnected = True
if (li_subAreasSuccessfullyConnected != [False]):
if (len(li_subAreasSuccessfullyConnected) == 0):
li_subAreasSuccessfullyConnected.append(True)
#raw_input("Press enter to continue")
elif (yMinFirstShape >= yMinSecondShape and yMinFirstShape <= yMaxSecondShape):
print("First shape Y starts after second, and starts before end of second")
# Need to travel along the X to connect them. First, find the delimiting Y space we can connect.
yConnectorLowerLimit = yMinFirstShape
yConnectorUpperLimit = min(yMaxFirstShape, yMaxSecondShape)
if (yConnectorUpperLimit - yConnectorLowerLimit <= CORRIDORSIZE):
closestFailed = True
terminationIterator += 1
else:
yCenter = randint(yConnectorLowerLimit + (CORRIDORSIZE / 2.0), yConnectorUpperLimit - (CORRIDORSIZE / 2.0))
#Find where on the X axis this needs to be located. Origin needs to be at the minimum maxX, and maximum minX
xOrigin = min(xMaxFirstShape, xMaxSecondShape)
yOrigin = yCenter - 2
xWidth = max(xMinFirstShape, xMinSecondShape) - xOrigin
print("The constructed connector will be:")
newConnector = Box((xOrigin, yOrigin), xWidth, CORRIDORSIZE)
print(newConnector)
self.connection = newConnector
self.childrenAreConnected = True
if (li_subAreasSuccessfullyConnected != [False]):
if (len(li_subAreasSuccessfullyConnected) == 0):
li_subAreasSuccessfullyConnected.append(True)
#raw_input("Press enter to continue")
elif (xMinSecondShape >= xMinFirstShape and xMinSecondShape <= xMaxFirstShape):
print("Second shape X starts after first, and starts before end of first.")
# Need to travel along the Y to connect them. First, find the delimiting X space we can connect.
xConnectorLowerLimit = xMinSecondShape
xConnectorUpperLimit = min(xMaxFirstShape, xMaxSecondShape)
if (xConnectorUpperLimit - xConnectorLowerLimit <= CORRIDORSIZE):
closestFailed = True
terminationIterator += 1
else:
xCenter = randint(xConnectorLowerLimit + (CORRIDORSIZE / 2.0), xConnectorUpperLimit - (CORRIDORSIZE / 2.0))
#Find where on the Y axis this needs to be located. Origin needs to be at the minimum maxX, and maximum minX
xOrigin = xCenter -2
yOrigin = min(yMaxFirstShape, yMaxSecondShape)
yWidth = max(yMinFirstShape, yMinSecondShape) - yOrigin
print("The constructed connector will be:")
newConnector = Box((xOrigin, yOrigin), CORRIDORSIZE, yWidth)
print(newConnector)
self.connection = newConnector
self.childrenAreConnected = True
if (li_subAreasSuccessfullyConnected != [False]):
if (len(li_subAreasSuccessfullyConnected) == 0):
li_subAreasSuccessfullyConnected.append(True)
#raw_input("Press enter to continue")
elif (yMinSecondShape >= yMinFirstShape and yMinSecondShape <= yMaxFirstShape):
print("Second shape Y starts after first, and starts before end of first.")
# Need to travel along the X to connect them. First, find the delimiting Y space we can connect.
yConnectorLowerLimit = yMinSecondShape
yConnectorUpperLimit = min(yMaxFirstShape, yMaxSecondShape)
if (yConnectorUpperLimit - yConnectorLowerLimit <= CORRIDORSIZE):
closestFailed = True
terminationIterator += 1
else:
yCenter = randint(yConnectorLowerLimit + (CORRIDORSIZE / 2.0), yConnectorUpperLimit - (CORRIDORSIZE / 2.0))
#Find where on the X axis this needs to be located. Origin needs to be at the minimum maxX, and maximum minX
xOrigin = min(xMaxFirstShape, xMaxSecondShape)
yOrigin = yCenter - 2
xWidth = max(xMinFirstShape, xMinSecondShape) - xOrigin
print("The constructed connector will be:")
newConnector = Box((xOrigin, yOrigin), xWidth, CORRIDORSIZE)
print(newConnector)
self.connection = newConnector
self.childrenAreConnected = True
if (li_subAreasSuccessfullyConnected != [False]):
if (len(li_subAreasSuccessfullyConnected) == 0):
li_subAreasSuccessfullyConnected.append(True)
#raw_input("Press enter to continue")
else:
print("Unable to make connection between:")
print(choiceFromFirstList)
print(choiceFromSecondList)
closestFailed = True
terminationIterator += 1
# If choosing the closest sub areas fails, we start picking at random.
# This should probably be picking the second clostest Boxes rather than at random.
if (closestFailed == True):
indexList[0] = randint(0, len(shapeListFirstChild) - 1)
indexList[1] = randint(0, len(shapeListSecondChild) - 1)
choiceFromFirstList = shapeListFirstChild[indexList[0]]
choiceFromSecondList = shapeListSecondChild[indexList[1]]
else:
indexList = boxHelper.returnIndicesOfClosestSubAreas(shapeListFirstChild, shapeListSecondChild)
choiceFromFirstList = shapeListFirstChild[indexList[0]]
choiceFromSecondList = shapeListSecondChild[indexList[1]]
'''
Tree class to contain the root node. This class doesn't do much
other than maintain the abstraction of the tree; it mostly just calls
Node member functions using the rood node.
The only unique function is that it can draw the tree, but maybe that
should be refactored into a graphics plotting class.
'''
class AreaTree(object):
def __init__(self, rootNode):
self.rootNode = rootNode
def __repr__(self):
return "Tree structure:\n\n" + self.rootNode.__repr__()
def searchNode(self, nodeNameToFind):
traversalList = []
print("\nInitiating search for: " + str(nodeNameToFind))
if (self.rootNode.name == nodeNameToFind):
print("Root node has value " + nodeNameToFind)
traversalList.append(0)
else:
self.rootNode.searchNode(nodeNameToFind, traversalList, 1, False)
if (len(traversalList) != 0):
traversalList[0] = self.rootNode.name
else:
traversalList = [None]
print("Finished searching")
print("TraversalList:")
print(traversalList)
return traversalList
def deleteNode(self, nodeToDelete):
print("Deleting " + nodeToDelete)
self.rootNode.deleteNode(nodeToDelete)
def addNode(self, nodeParentName, nodeNameToAdd, box):
print("Adding " + nodeNameToAdd + " to node " + nodeParentName)
if (self.rootNode.name == nodeParentName):
newNode = AreaNode(nodeNameToAdd, defaultdict(AreaNode), box)
self.rootNode.children[nodeNameToAdd] = newNode
else:
self.rootNode.addNode(nodeParentName, nodeNameToAdd, box, 1, False)
def partitionNode(self, nodeNameToPartition, partitionNames):
print("Partitioning " + nodeNameToPartition)
if (nodeNameToPartition == self.rootNode.name):
if (len(self.rootNode.children) == 0):
MAGICMINIMUMAREA = 10
print("Area of " + self.rootNode.name + " is " + str(self.rootNode.box.getArea()) )
if (self.rootNode.box.getArea() > MAGICMINIMUMAREA):
boxes = self.rootNode.box.partitionBox();
self.addNode(nodeNameToPartition, partitionNames[0], boxes[0])
self.addNode(nodeNameToPartition, partitionNames[1], boxes[1])
print(self.rootNode.children)
else:
print("Insufficient area to partition. Not partitioning")
else:
print("Node already has children. Not partitioning.")
else:
self.rootNode.partitionNode(nodeNameToPartition, partitionNames, Box(), 1, False)
def getNodeArea(self, nodeNameToFind):
area = [] #For whatever reason I have to pass this as a list for it to be modified.
if (nodeNameToFind == self.rootNode.name):
area.append(self.rootNode.box.area)
else:
self.rootNode.getNodeArea(nodeNameToFind, area)
return area[0]
def constructSubAreas(self):
print("Creating sub areas")
self.rootNode.subArea = self.rootNode.box.constructSubArea()
self.rootNode.constructSubArea()
def resetSubAreas(self):
print("Resetting sub areas")
self.rootNode.subArea = Box()
self.rootNode.childrenAreConnected = False
self.rootNode.connection = Box()
self.rootNode.resetSubArea()
def connectSubAreas(self, li_areasAreConnected):
print("Connecting sub areas")
self.rootNode.connectSubArea(li_areasAreConnected)
def getListOfLeafPairs(self, leafPairList):
print("Getting list of leaf pairs")
self.rootNode.getListOfLeafPairs(leafPairList)
def showAreaTree(self):
fig = plt.figure()
ax = fig.gca() #GCA = get current axes
#This gets plotted, but isn't seen since things are drawn over it.
nodeBox = Rectangle(self.rootNode.box.getOrigin(), self.rootNode.box.getWidth(), self.rootNode.box.getHeight(), facecolor="grey")
ax.add_patch(nodeBox)
ax.set_ylim(self.rootNode.box.getOrigin()[0], self.rootNode.box.getOrigin()[0] + self.rootNode.box.getHeight())
ax.set_xlim(self.rootNode.box.getOrigin()[1], self.rootNode.box.getOrigin()[1] + self.rootNode.box.getWidth())
nodeColor = "blue"
rectangleList = []
self.rootNode.getRectangles(rectangleList, nodeColor)
roomColor = "orange"
corridorColor = "grey"
subAreaRectangleList = []
self.rootNode.getSubAreaRectangles(subAreaRectangleList, roomColor, corridorColor)
for rectangle in reversed(rectangleList): #Need to reverse it so the smaller rectangles get drawn over the larger
ax.add_patch(rectangle)
for rectangle in reversed(subAreaRectangleList):
ax.add_patch(rectangle)
fig.show()
'''
Prototype implementation of the binary space partitioning method
of map construction used here.
http://pcgbook.com/wp-content/uploads/chapter03.pdf
1: start with the entire dungeon area (root node of the BSP tree)
2: divide the area along a horizontal or vertical line
3: select one of the two new partition cells
4: if this cell is bigger than the minimal acceptable size:
5: go to step 2 (using this cell as the area to be divided)
6: select the other partition cell, and go to step 4
7: for every partition cell:
8: create a room within the cell by randomly
choosing two points (top left and bottom right)
within its boundaries
9: starting from the lowest layers, draw corridors to connect
rooms in the nodes of the BSP tree with children of the same
parent
10:repeat 9 until the children of the root node are connected
'''
def generateBSPMap():
# 1: start with the entire area (root node of the BSP tree)
rootNodeBox = Box((0, 0), 256, 256) #The dimensions should be evenly divisible by 2
rootNode = AreaNode("root", defaultdict(AreaNode), rootNodeBox)
tree = AreaTree(rootNode)
firstPartitionNames = ("A", "B")
# 2: divide the area along a horizontal or vertical line
tree.partitionNode("root", firstPartitionNames)
currentArea = rootNodeBox.getArea()
currentPartitionNames = firstPartitionNames
MAGICMINIMUMAREA = (0.03125) * 256 * 256
#MAGICMINIMUMAREA = (0.10) * 256 * 256
while (currentArea > MAGICMINIMUMAREA):
# 3: select one of the two new partition cells
chosenIndex = random.choice([0, 1])
chosenPartition = currentPartitionNames[chosenIndex]
if (chosenIndex == 0):
otherPartition = currentPartitionNames[1]
else:
otherPartition = currentPartitionNames[0]
#4: if this cell is bigger than the minimal acceptable size:
print("Chosen partition " + chosenPartition + " has node area " + str(tree.getNodeArea(chosenPartition)))
if (tree.getNodeArea(chosenPartition) > MAGICMINIMUMAREA):
#5: go to step 2 (using this cell as the area to be divided)
newPartitionNames = (chosenPartition + "_0", chosenPartition + "_1")
tree.partitionNode(chosenPartition, newPartitionNames)
#6: select the other partition cell, and go to step 4
if (tree.getNodeArea(otherPartition) > MAGICMINIMUMAREA):
newPartitionNames = (otherPartition + "_0", otherPartition + "_1")
tree.partitionNode(otherPartition, newPartitionNames)
currentArea = min([tree.getNodeArea(chosenPartition), tree.getNodeArea(otherPartition)])
partitionNameList = []
tree.getListOfLeafPairs(partitionNameList)
currentPartitionNames = random.choice(partitionNameList)
#7: for every partition cell:
#8: create a room within the cell by randomly
# choosing two points (top left and bottom right)
# within its boundaries
li_areasAreConnected = []
terminationIterator = 0
while (li_areasAreConnected == [False] or li_areasAreConnected == []):
tree.resetSubAreas()
tree.constructSubAreas()
#9: starting from the lowest layers, draw corridors to connect
# rooms in the nodes of the BSP tree with children of the same
# parent
#10:repeat 9 until the children of the root node are connected
li_areasAreConnected = []
tree.connectSubAreas(li_areasAreConnected)
terminationIterator += 1
if (terminationIterator > 50):
print("Attempted too many iterations. Terminating.")
print(li_areasAreConnected)
break
if (li_areasAreConnected == [True]):
print(tree)
tree.showAreaTree()
if __name__ == "__main__":
generateBSPMap()
| gpl-2.0 |
shahankhatch/scikit-learn | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
myron0330/caching-research | section_cmab/simulation/simu_size_comp.py | 1 | 6966 | # -*- coding: UTF-8 -*-
# **********************************************************************************#
# File:
# **********************************************************************************#
from __future__ import division
import pickle
from collections import OrderedDict
from os import listdir
import pandas as pd
from display_rewards.rewards import display_multiple_
from section_cmab.algorithms import primal_dual_recover, branch_and_bound
from section_cmab.simulation.base import simulate_with_
algorithm_mapper = {
'branch_and_bound': 'B&B',
'primal_dual_recover': 'Proposed algorithm',
'lfu': 'LFU',
'lru': 'LRU'
}
def size_comparison(algorithm, circles=200, dump=True, display=True, algorithm_type='original',
prefix='', **plot_kwargs):
"""
Algorithm comparison
Args:
algorithm(function): algorithm
circles(int): circles
dump(boolean): whether to dump result to file
display(boolean): whether to display
algorithm_type(string): algorithm type
prefix(string): prefix
"""
configs = filter(lambda x: x.startswith('sizes_comparison'), listdir('../etc'))
rewards_dict = dict()
for config in configs:
key = 'Size-{}'.format(config.split('_')[-1]).split('.')[0]
rewards_dict[key] = simulate_with_(algorithm, config=config, circles=circles,
dump=dump, algorithm_type=algorithm_type,
prefix=prefix, fixed_theta=True)
if display:
display_multiple_(rewards_dict, **plot_kwargs)
return rewards_dict
def display_memory_comparison_by_(prefix, **plot_kwargs):
"""
Display memory comparison.
Args:
prefix(prefix): prefix string
"""
pks = sorted(filter(lambda x: x.startswith(prefix), listdir('../performance')),
key=lambda x: int(x.split('-')[2]), reverse=True)
rewards_dict = OrderedDict()
for pk in pks:
rewards = pickle.load(open('../performance/{}'.format(pk), 'r+'))
key = 'Memory-{}'.format(pk.split('-')[2])
rewards_dict[key] = rewards
display_multiple_(rewards_dict, **plot_kwargs)
def display_sizes_iteration(prefix, **plot_kwargs):
"""
Display memory comparison
"""
pks = []
if isinstance(prefix, (str, unicode)):
prefix = [prefix]
for pre in prefix:
pks += sorted(filter(lambda x: x.startswith(pre), listdir('../performance')),
key=lambda x: int(x.split('-')[2]), reverse=False)
rewards_dict = OrderedDict()
x_axis = list()
for pk in sorted(pks, key=lambda x: int(x.split('-')[1])):
rewards = pickle.load(open('../performance/{}'.format(pk), 'r+'))[150:]
frame = pd.DataFrame(rewards).head(100)
key = pk.split('.')[2]
x_axis.append(int(pk.split('-')[1]))
rewards_dict.setdefault(algorithm_mapper[key], list())
rewards_dict[algorithm_mapper[key]].append(frame.sum(axis=1).mean())
results = OrderedDict()
for key in rewards_dict.keys():
results[key] = sorted(rewards_dict[key], reverse=True)
# results = rewards_dict
plot_kwargs['x_axis'] = []
for _ in x_axis:
if _ not in plot_kwargs['x_axis']:
plot_kwargs['x_axis'].append(_)
display_multiple_(results, **plot_kwargs)
def compare_sizes_with_(algorithms, circles=100, dump=False, display=False, prefix='', **plot_parameters):
for current_algorithm in algorithms:
if current_algorithm == branch_and_bound:
size_comparison(algorithm=branch_and_bound, circles=circles, dump=dump,
algorithm_type='optimal', display=display, prefix=prefix,
**plot_parameters)
elif current_algorithm == primal_dual_recover:
size_comparison(algorithm=current_algorithm, circles=circles, dump=dump,
algorithm_type='original', display=display, prefix=prefix,
**plot_parameters)
else:
size_comparison(algorithm=current_algorithm, circles=circles, dump=dump,
algorithm_type='comparison', display=display, prefix=prefix,
**plot_parameters)
def plot_sizes_comparison():
parameters = {
'display_length': 100,
'line_width': 2.5,
'title_size': 20,
'label_size': 18,
'marker': '',
'marker_size': 8,
'x_label': u'',
'y_label': u'',
'with_standardize': True,
'standardize_init': 2,
'standardize_special': False,
'sigma': 0.5,
'legend_size': 15,
'y_min_lim': 200,
'loc': 4,
'texts': [
{
'args': (55, 187, '$K$'),
'kwargs': {
'horizontalalignment': 'center',
'verticalalignment': 'center',
'fontsize': 20,
}
},
{
'args': (4.8, 300, '$\\overline{R}$'),
'kwargs': {
'horizontalalignment': 'center',
'verticalalignment': 'center',
'fontsize': 20,
'rotation': 90,
}
}
],
'save_path': '../plots/sizes_comparison.jpg'
}
display_sizes_iteration(['sizes.rewards.branch_and_bound.fixed.5-',
'sizes.rewards.primal_dual_recover.5-',
'sizes.rewards.lfu.5-',
'sizes.rewards.lru.5-'
], **parameters)
if __name__ == '__main__':
plot_parameters = {
'display_length': 100,
'line_width': 2,
'title_size': 20,
'label_size': 18,
'marker': '',
'marker_size': 8,
'x_label': u'',
'y_label': u'',
'with_standardize': True,
'standardize_init': 10,
'sigma': 1.5,
'save_path': '../plots/sizes_comparison.jpg'
}
# compare_sizes_with_(algorithms=[primal_dual_recover],
# circles=200, dump=True, prefix='sizes', display=False,
# **plot_parameters)
# display_memory_comparison_by_('rewards.primal_dual_recover.4-20-', **plot_parameters)
# display_memory_iteration(['rewards.branch_and_bound.dynamic.4-20-',
# 'rewards.primal_dual_recover.4-20-',
# 'rewards.lfu.4-20-',
# 'rewards.lru.4-20-',
# ], **plot_parameters)
# algorithms = [primal_dual_recover, branch_and_bound, lfu, lru]
# compare_memories_with_(algorithms, circles=100, dump=True, display=False)
# plot_memory_comparison()
# display_sizes_iteration(prefix='sizes.', **plot_parameters)
plot_sizes_comparison()
| mit |
SpaceKatt/CSPLN | apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/matplotlib/container.py | 3 | 2832 | import matplotlib.cbook as cbook
class Container(tuple):
"""
Base class for containers.
"""
def __repr__(self):
return "<Container object of %d artists>" % (len(self))
def __new__(cls, *kl, **kwargs):
return tuple.__new__(cls, kl[0])
def __init__(self, kl, label=None):
self.eventson = False # fire events only if eventson
self._oid = 0 # an observer id
self._propobservers = {} # a dict from oids to funcs
self._remove_method = None
self.set_label(label)
def set_remove_method(self, f):
self._remove_method = f
def remove(self):
for c in self:
c.remove()
if self._remove_method:
self._remove_method()
def get_label(self):
"""
Get the label used for this artist in the legend.
"""
return self._label
def set_label(self, s):
"""
Set the label to *s* for auto legend.
ACCEPTS: any string
"""
self._label = s
self.pchanged()
def add_callback(self, func):
"""
Adds a callback function that will be called whenever one of
the :class:`Artist`'s properties changes.
Returns an *id* that is useful for removing the callback with
:meth:`remove_callback` later.
"""
oid = self._oid
self._propobservers[oid] = func
self._oid += 1
return oid
def remove_callback(self, oid):
"""
Remove a callback based on its *id*.
.. seealso::
:meth:`add_callback`
For adding callbacks
"""
try: del self._propobservers[oid]
except KeyError: pass
def pchanged(self):
"""
Fire an event when property changed, calling all of the
registered callbacks.
"""
for oid, func in self._propobservers.items():
func(self)
def get_children(self):
return list(cbook.flatten(self))
class BarContainer(Container):
def __init__(self, patches, errorbar=None, **kwargs):
self.patches = patches
self.errorbar = errorbar
Container.__init__(self, patches, **kwargs)
class ErrorbarContainer(Container):
def __init__(self, lines, has_xerr=False, has_yerr=False, **kwargs):
self.lines = lines
self.has_xerr = has_xerr
self.has_yerr = has_yerr
Container.__init__(self, lines, **kwargs)
class StemContainer(Container):
def __init__(self, markerline_stemlines_baseline, **kwargs):
markerline, stemlines, baseline = markerline_stemlines_baseline
self.markerline = markerline
self.stemlines = stemlines
self.baseline = baseline
Container.__init__(self, markerline_stemlines_baseline, **kwargs)
| gpl-3.0 |
vasyvas/deepdive | util/calibration.py | 18 | 1823 | #! /usr/bin/env python
# Usage: calibration.py [target/calibration_data_file.csv] [output_file.png]
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
CALIBRATION_FILE = sys.argv[1]
OUT_IMG_FILE = sys.argv[2]
labels = []
counts = []
prec = []
counts_train = []
for l in open(CALIBRATION_FILE):
(a,b,c,d,e) = l.rstrip().split('\t')
labels.append((float(a) + float(b))/2)
counts.append(int(c))
if float(d) + float(e) == 0:
prec.append(0.0)
else:
prec.append(float(d)/(float(d) + float(e)))
counts_train.append(float(d)+float(e))
fig, ax = plt.subplots(figsize=(12,3))
MARGIN = 1
fig.subplots_adjust(right=0.99, left=0.05, top=0.9, bottom=0.25)
gs = gridspec.GridSpec(1, 3, width_ratios=[1,1,1])
plt.subplot(gs[0])
width = 0.1
labels_nz = []
prec_nz = []
for i in range(0, len(labels)):
if counts_train[i] != 0:
labels_nz.append(labels[i])
prec_nz.append(prec[i])
plt.plot(labels_nz, prec_nz, 'ro-')
plt.plot([0,1],[0,1],'b--')
plt.title("(a) Accuracy (Testing Set)")
plt.ylabel("Accuracy")
plt.xlabel("Probability")
plt.ylim(0,1)
plt.xlim(0,1.1)
plt.text(0, -0.35 , "* (a) and (b) are produced using 50% held-out on evidence variables; (c) also includes all non-evidence variables of the same relation.", fontsize=10, style='italic')
plt.subplot(gs[1])
width = 0.1
plt.bar(labels, counts_train, width, color='b')
plt.title("(b) # Predictions (Testing Set)")
plt.ylabel("# Predictions")
plt.xlabel("Probability")
plt.xlim(0,1.1)
plt.subplot(gs[2])
width = 0.1
plt.bar(labels, counts, width, color='b')
plt.title("(c) # Predictions (Whole Set)")
plt.ylabel("# Predictions")
plt.xlabel("Probability")
plt.xlim(0,1.1)
plt.savefig(OUT_IMG_FILE)
| apache-2.0 |
huzq/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 22 | 2942 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import LabelSpreading
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(2)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:340]]
y = digits.target[indices[:340]]
images = digits.images[indices[:340]]
n_total_samples = len(y)
n_labeled_points = 40
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# #############################################################################
# Shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
# #############################################################################
# Learn with LabelSpreading
lp_model = LabelSpreading(gamma=.25, max_iter=20)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# #############################################################################
# Calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# #############################################################################
# Pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
# #############################################################################
# Plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
Vimos/scikit-learn | examples/covariance/plot_outlier_detection.py | 36 | 5023 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates three
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
- using the Isolation Forest algorithm, which is based on random forests and
hence more adapted to large-dimensional settings, even if it performs
quite well in the examples below.
- using the Local Outlier Factor to measure the local deviation of a given
data point with respect to its neighbors by comparing their local density.
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
print(__doc__)
rng = np.random.RandomState(42)
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"Robust covariance": EllipticEnvelope(contamination=outliers_fraction),
"Isolation Forest": IsolationForest(max_samples=n_samples,
contamination=outliers_fraction,
random_state=rng),
"Local Outlier Factor": LocalOutlierFactor(
n_neighbors=35,
contamination=outliers_fraction)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 100), np.linspace(-7, 7, 100))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = -1
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(n_inliers // 2, 2) - offset
X2 = 0.3 * np.random.randn(n_inliers // 2, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model
plt.figure(figsize=(9, 7))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
if clf_name == "Local Outlier Factor":
y_pred = clf.fit_predict(X)
scores_pred = clf.negative_outlier_factor_
else:
clf.fit(X)
scores_pred = clf.decision_function(X)
y_pred = clf.predict(X)
threshold = stats.scoreatpercentile(scores_pred,
100 * outliers_fraction)
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
if clf_name == "Local Outlier Factor":
# decision_function is private for LOF
Z = clf._decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(2, 2, i + 1)
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=10),
loc='lower right')
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.suptitle("Outlier detection")
plt.show()
| bsd-3-clause |
vivekmishra1991/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 234 | 12267 | # Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
mohseniaref/PySAR-1 | pysar/tsviewer.py | 1 | 9048 | #! /usr/bin/env python
############################################################
# Program is part of PySAR v1.0 #
# Copyright(c) 2013, Heresh Fattahi #
# Author: Heresh Fattahi #
############################################################
import sys
import os
import getopt
import time
import datetime
from numpy import *
from scipy.io import loadmat
import matplotlib
import matplotlib.pyplot as plt
from pylab import *
import h5py
from mpl_toolkits.axes_grid.inset_locator import inset_axes
def Usage():
print '''
***************************************************************
***************************************************************
Time-series viewer
Usage:
tsviewer.py -f timeseriesFile.h5 -v velocityFile.h5 -l lower bound -h higher bound -s fontsize -m Marker Size -c marker color -w linewidth -u unit
-f : file of the timeseries
-v : velocity file (if not specified then the last time-series epoch is displayed)
-l : lower bound of the displacement (default is minimum of the displacemen)
-h : higher bound of the displacemet (default is max of the displacemen)
-s : size of font used x and y labels (default = 22)
-m : marker size (default = 16)
-c : color of the markers (default = red). some options are: orange, black, yellow, blue, green...
-w : width of lines to connect the points (default = 2 ). set to 0 (-l 0) if you don't want any line connecting the points
-u : unit of the displacement (default = cm). Other optons are: mm and m
-e : event dates
-a : lower bound of the colorscale to display the velocity to display
-b : higher bound of the colorscale to display the velocity to display
-F : another timeseries file (can be used to compare 2 time-series)
-S : save to matlab [default: no]
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Example:
tsviewer.py timeseries.h5
tsviewer.py -f timeseries.h5
tsviewer.py -f timeseries_demCor.h5 -v velocity_masked.h5 -u m -c blue
tsviewer.py -f timeseries.h5 -v velocity.h5 -s 24 -m 12 -c orange -l -10 -h 10 -w 4 -u mm
tsviewer.py -f timeseries.h5 -F timeseries_tropCor.h5
tsviewer.py -f timeseries.h5 -v velocity.h5 -a -0.01 -b 0.01
tsviewer.py -f timeseries.h5 -S yes
***************************************************************
***************************************************************
'''
def main(argv):
markerSize=16
markerSize2=16
markerColor='g'
markerColor2='red'
lineWidth=2
fontSize=22
unit='cm'
Save_timeseries='no'
if len(sys.argv)>2:
try:
opts, args = getopt.getopt(argv,"f:F:v:a:b:s:m:c:w:u:l:h:S:")
except getopt.GetoptError:
Usage() ; sys.exit(1)
for opt,arg in opts:
if opt == '-f':
timeSeriesFile = arg
elif opt == '-F':
timeSeriesFile_2 = arg
elif opt == '-v':
velocityFile = arg
elif opt == '-a':
vmin = float(arg)
elif opt == '-b':
vmax = float(arg)
elif opt == '-s':
fontSize = int(arg)
elif opt == '-m':
markerSize=int(arg)
markerSize2=int(arg)
elif opt == '-S':
Save_timeseries=arg
elif opt == '-c':
markerColor=arg
elif opt == '-w':
lineWidth=int(arg)
elif opt == '-u':
unit=arg
elif opt == '-l':
lbound=float(arg)
elif opt == '-h':
hbound=float(arg)
elif len(sys.argv)==2:
if argv[0]=='-h':
Usage(); sys.exit(1)
elif os.path.isfile(argv[0]):
timeSeriesFile = argv[0]
h5timeseries = h5py.File(timeSeriesFile)
if not 'timeseries' in h5timeseries.keys():
print 'ERROR'
Usage(); sys.exit(1)
else:
Usage(); sys.exit(1)
elif len(sys.argv)<2:
Usage(); sys.exit(1)
if unit in ('m','M'):
unitFac=1
elif unit in ('cm','Cm','CM'):
unitFac=100
elif unit in ('mm','Mm','MM','mM'):
unitFac=1000
else:
print 'Warning:'
print 'wrong unit input!'
print 'cm is considered to display the displacement'
############################################
if not os.path.isfile(timeSeriesFile):
Usage();sys.exit(1)
h5timeseries = h5py.File(timeSeriesFile)
if not 'timeseries' in h5timeseries.keys():
Usage(); sys.exit(1)
dateList = h5timeseries['timeseries'].keys()
dateIndex={}
for ni in range(len(dateList)):
dateIndex[dateList[ni]]=ni
tbase=[]
d1 = datetime.datetime(*time.strptime(dateList[0],"%Y%m%d")[0:5])
for ni in range(len(dateList)):
d2 = datetime.datetime(*time.strptime(dateList[ni],"%Y%m%d")[0:5])
diff = d2-d1
tbase.append(diff.days)
dates=[]
for ni in range(len(dateList)):
d = datetime.datetime(*time.strptime(dateList[ni],"%Y%m%d")[0:5])
dates.append(d)
datevector=[]
for i in range(len(dates)):
datevector.append(np.float(dates[i].year) + np.float(dates[i].month-1)/12 + np.float(dates[i].day-1)/365)
datevector2=[round(i,2) for i in datevector]
###########################################
# eventDates=['20041223','20051003']
# try:
# eventDates
# events=[]
# for ni in range(len(eventDates)):
# d = datetime.datetime(*time.strptime(eventDates[ni],"%Y%m%d")[0:5])
# events.append(d)
# except:
# print ''
#print events
###########################################
try:
velocityFile
h5file=h5py.File(velocityFile,'r')
k=h5file.keys()
dset= h5file[k[0]].get(k[0])
print 'The file to display is: ' + k[0]
except:
dset = h5timeseries['timeseries'].get(h5timeseries['timeseries'].keys()[-1])
# timeseries = np.zeros((len(h5timeseries['timeseries'].keys()),np.shape(dset)[0],np.shape(dset)[1]),np.float32)
# for date in h5timeseries['timeseries'].keys():
# timeseries[dateIndex[date]] = h5timeseries['timeseries'].get(date)
###########################################
fig = plt.figure()
ax=fig.add_subplot(111)
try:
vmin
vmax
img=ax.imshow(dset,vmin=vmin,vmax=vmax)
except:
img=ax.imshow(dset)
fig2 = plt.figure(2)
ax2=fig2.add_subplot(111)
# print dates
# print dateList
try:
timeSeriesFile_2
h5timeseries_2=h5py.File(timeSeriesFile_2)
except:
print""
##########################################
def onclick(event):
if event.button==1:
print 'click'
xClick = int(event.xdata)
yClick = int(event.ydata)
Dis=[]
for date in h5timeseries['timeseries'].keys():
Dis.append( h5timeseries['timeseries'].get(date)[yClick][xClick])
ax2.cla()
try:
Dis2=[]
for date in dateList:
Dis2.append( h5timeseries_2['timeseries'].get(date)[yClick][xClick])
dis2=array(Dis2)
dis2=dis2*unitFac
ax2.plot(dates,dis2, '^',ms=markerSize2, alpha=0.7, mfc=markerColor2)
except:
Dis2=[]
# ax2.plot(dates,dis, '-ko',ms=markerSize, lw=lineWidth, alpha=0.7, mfc=markerColor)
dis=array(Dis)
if Save_timeseries in ['yes','y','YES','Yes']:
import scipy.io as sio
Delay={}
Delay['displacement']=dis
Delay['unit']='m'
Delay['time']=datevector
sio.savemat('displacement.mat', {'displacement': Delay})
dis=dis*unitFac
ax2.plot(dates,dis, '-ko',ms=markerSize, lw=lineWidth, alpha=0.7, mfc=markerColor)
# print dis
# print dates
print dset[yClick][xClick]
ax2.fmt_xdata = DateFormatter('%Y-%m-%d %H:%M:%S')
if unitFac==100:
ax2.set_ylabel('Displacement [cm]',fontsize=fontSize)
elif unitFac==1000:
ax2.set_ylabel('Displacement [mm]',fontsize=fontSize)
else:
ax2.set_ylabel('Displacement [m]',fontsize=fontSize)
ax2.set_xlabel('Time [years]',fontsize=fontSize)
ds=datevector[0]-0.2
de=datevector[-1]+0.2
ys=int(ds)
ye=int(de)
ms=int((ds-ys)*12)+1
me=int((de-ye)*12)+1
dss=datetime.datetime(ys,ms,1,0,0)
dee=datetime.datetime(ye,me,1,0,0)
ax2.set_xlim(dss,dee)
try:
lbound
hbound
ax2.set_ylim(lbound,hbound)
except:
ax2.set_ylim(min(dis)-0.4*abs(min(dis)),max(dis)+0.4*max(dis))
for tick in ax2.xaxis.get_major_ticks():
tick.label.set_fontsize(fontSize)
for tick in ax2.yaxis.get_major_ticks():
tick.label.set_fontsize(fontSize)
# specify integer or one of preset strings, e.g.
#tick.label.set_fontsize('x-small')
# tick.label.set_rotation('vertical')
fig2.autofmt_xdate()
plt.show()
cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.show()
if __name__ == '__main__':
main(sys.argv[1:])
| mit |
METASPACE2020/sm-engine | sm/engine/tests/msm_basic/test_formula_img_validator.py | 2 | 4236 | from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
from unittest.mock import patch, MagicMock
from numpy.testing import assert_array_almost_equal
from pandas.util.testing import assert_frame_equal
from scipy.sparse import csr_matrix
from sm.engine.dataset_manager import DatasetManager, Dataset
from sm.engine import DatasetReader
from sm.engine.fdr import FDR
from sm.engine.mol_db import MolecularDB
from sm.engine.msm_basic.formula_img_validator import ImgMetrics
from sm.engine.msm_basic.formula_img_validator import sf_image_metrics, get_compute_img_metrics
from sm.engine.tests.util import pysparkling_context as spark_context, ds_config, sm_config
@patch('sm.engine.msm_basic.formula_img_validator.isotope_pattern_match', return_value=0.95)
@patch('sm.engine.msm_basic.formula_img_validator.isotope_image_correlation', return_value=0.8)
@patch('sm.engine.msm_basic.formula_img_validator.measure_of_chaos', return_value=0.99)
def test_get_compute_img_measures_pass(chaos_mock, image_corr_mock, pattern_match_mock):
img_gen_conf = {
'nlevels': 30,
'do_preprocessing': False,
'q': 99.0
}
empty_matrix = np.zeros((2, 3))
metrics = OrderedDict([('chaos', 0), ('spatial', 0), ('spectral', 0),
('total_iso_ints', [0, 0, 0, 0]),
('min_iso_ints', [0, 0, 0, 0]),
('max_iso_ints', [0, 0, 0, 0])])
compute_measures = get_compute_img_metrics(metrics, np.ones(2*3).astype(bool),
empty_matrix, img_gen_conf)
sf_iso_images = [csr_matrix([[0., 100., 100.], [10., 0., 3.]]),
csr_matrix([[0., 50., 50.], [0., 20., 0.]])]
sf_intensity = [100., 10., 1.]
measures = compute_measures(sf_iso_images, sf_intensity)
assert measures == (0.99, 0.8, 0.95, [213., 120., 0.], [0, 0, 0], [100., 50., 0.])
@pytest.fixture(scope='module')
def ds_formulas_images_mock():
ds_mock = Dataset('ds_id')
ds_mock.config = {'image_generation': {}}
ds_reader_mock = MagicMock(spec=DatasetReader)
ds_reader_mock.get_dims.return_value = (2, 3)
ds_reader_mock.get_sample_area_mask.return_value = np.ones(2*3).astype(bool)
sf_iso_images = [(0, [csr_matrix([[0, 100, 100], [10, 0, 3]]), csr_matrix([[0, 50, 50], [0, 20, 0]])]),
(1, [csr_matrix([[0, 100, 100], [10, 0, 3]]), csr_matrix([[0, 50, 50], [0, 20, 0]])])]
return ds_mock, ds_reader_mock, sf_iso_images
def test_sf_image_metrics(spark_context, ds_formulas_images_mock, ds_config):
with patch('sm.engine.msm_basic.formula_img_validator.get_compute_img_metrics') as mock:
mock.return_value = lambda *args: (0.9, 0.9, 0.9, [100., 10.], [0, 0], [10., 1.])
ds_mock, ds_reader_mock, ref_images = ds_formulas_images_mock
ref_images_rdd = spark_context.parallelize(ref_images)
metrics = OrderedDict([('chaos', 0), ('spatial', 0), ('spectral', 0),
('total_iso_ints', [0, 0, 0, 0]),
('min_iso_ints', [0, 0, 0, 0]),
('max_iso_ints', [0, 0, 0, 0])])
ion_centr_ints = {0: [100, 10, 1], 1: [100, 10, 1]}
metrics_df = sf_image_metrics(ref_images_rdd, metrics, ds_mock,
ds_reader_mock, ion_centr_ints, spark_context)
exp_metrics_df = (pd.DataFrame([[0, 0.9, 0.9, 0.9, [100., 10.], [0, 0], [10., 1.], 0.9**3],
[1, 0.9, 0.9, 0.9, [100., 10.], [0, 0], [10., 1.], 0.9**3]],
columns=['ion_i', 'chaos', 'spatial', 'spectral',
'total_iso_ints', 'min_iso_ints', 'max_iso_ints', 'msm'])
.set_index(['ion_i']))
assert_frame_equal(metrics_df, exp_metrics_df)
@pytest.mark.parametrize("nan_value", [None, np.NaN, np.NAN, np.inf])
def test_img_measures_replace_invalid_measure_values(nan_value):
invalid_img_measures = ImgMetrics(OrderedDict([('chaos', None), ('spatial', np.NAN), ('spectral', np.inf)]))
assert invalid_img_measures.to_tuple(replace_nan=True) == (0., 0., 0.)
| apache-2.0 |
tammoippen/nest-simulator | extras/ConnPlotter/tcd_nest.py | 15 | 6952 | # -*- coding: utf-8 -*-
#
# tcd_nest.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# ConnPlotter --- A Tool to Generate Connectivity Pattern Matrices
"""
Interface routines to extract synapse information from NEST.
This file provides the interface to NEST required to plot effective
kernel connectivity as total charge deposited (TCD) as a function of
mean membrane potential.
In order to use TCD plots, you need to create an instance of class
SynapsesNEST. The constructor will import NEST to obtain all necessary
information. TCD can then be obtained by calling the generated object.
NB: At present, TCD is supported only for the ht_model. NMDA charge
deposition is based on steady-state value for open channels at given
voltage.
"""
# ----------------------------------------------------------------------------
import numpy as np
__all__ = ['TCD_NEST']
# ----------------------------------------------------------------------------
class TCD(object):
"""
Access total charge deposited (TCD) information for NEST neurons.
Create one instance of this class and call it to obtain charge
information.
NB: The constructor for this class imports NEST.
NB: Currently, only ht_model is supported, with synapse types
AMPA, NMDA, GABA_A, GABA_B.
"""
# ------------------------------------------------------------------------
def __init__(self, modelList):
"""
Create TCD computer for given modelList.
The constructor instantiates NEST, including a call to
ResetKernel() and instantiates all models in modelList.
From all models derived from ht_model, synapse information
is extracted and stored. Afterward, ResetKernel() is called
once more.
modelList: tuples of (parent, model, dict)
Note: nest must have been imported before and all necessary modules
loaded.
"""
import nest
nest.ResetKernel()
# keep "list" over all models derived from ht_neuron
ht_kids = set(["ht_neuron"])
for parent, model, props in modelList:
if parent in ht_kids and model not in ht_kids:
nest.CopyModel(parent, model, props)
ht_kids.add(model)
# ht_kids now contains all models derived from ht_neuron
# We collect in _tcd_info a mapping from (targetmodel, synapstype)
# to an object containing all required information for TCD computation.
self._tcd_info = {}
for mod in ht_kids:
props = nest.GetDefaults(mod)
for syn in ['AMPA', 'GABA_A', 'GABA_B']:
self._tcd_info[(mod, syn)] = self._TcdBeta(syn, props)
self._tcd_info[(mod, 'NMDA')] = self._TcdNMDA(props)
# delete models we created
nest.ResetKernel()
# ------------------------------------------------------------------------
def __call__(self, syn_type, target, V):
"""
Return total charge deposited by a single spike through
synapse of syn_type with syn_wght onto target, given that
target has membrane potential V.
Arguments:
syn_type synapse type (string: AMPA, NMDA, GABA_A, GABA_B)
target name of target neuron model (string)
V membrane potential (double)
Returns:
charge (double)
"""
return self._tcd_info[(target, syn_type)](V)
# ------------------------------------------------------------------------
class _TcdBeta(object):
"""
Class representing plain beta-function synapse model.
"""
def __init__(self, syn, props):
"""
syn is name of synapse type.
props is property dictionary of ht_neuron.
"""
td = props[syn + '_tau_2'] # decay time
tr = props[syn + '_tau_1'] # rise time
# integral over g(t)
self._int_g = (props[syn + '_g_peak'] * (td - tr) /
((tr / td) ** (tr / (td - tr)) -
(tr / td) ** (td / (td - tr))))
self._e_rev = props[syn + '_E_rev']
def __call__(self, V):
"""
V is membrane potential.
"""
return -self._int_g * (V - self._e_rev)
def __str__(self):
return "_int_g = %f, _e_rev = %f" % (self._int_g, self._e_rev)
# ------------------------------------------------------------------------
class _TcdNMDA(object):
"""
Class representing NMDA synapse model in ht_neuron.
Note: NMDA charge deposition is based on steady-state value
for open channels at given voltage.
"""
def __init__(self, props):
"""
props is property dictionary of ht_neuron.
"""
td = props['tau_decay_NMDA'] # decay time
tr = props['tau_rise_NMDA'] # rise time
# integral over g(t)
self._int_g = (props['g_peak_NMDA'] * (td - tr) /
((tr / td) ** (tr / (td - tr)) -
(tr / td) ** (td / (td - tr))))
self._e_rev = props['E_rev_NMDA']
self._v_act = props['V_act_NMDA']
self._s_act = props['S_act_NMDA']
def __call__(self, V):
"""
V is membrane potential.
"""
return (-self._int_g * (V - self._e_rev) /
(1. + np.exp((self._v_act - V) / self._s_act)))
def __str__(self):
return "_int_g = %f, _e_rev = %f, _v_act = %f, _s_act = %f" \
% (self._int_g, self._e_rev, self._v_act, self._s_act)
# ----------------------------------------------------------------------------
if __name__ == '__main__':
import matplotlib.pyplot as plt
import sys
sys.path.append('/Users/plesser/Projects/hill-model/scripts')
import ht_def_new_sq
import ht_params
htl, htc, htm = ht_def_new_sq.hill_tononi(ht_params.Params)
tcd = TCD(htm)
v = np.linspace(-90, 0, 100)
syns = ['AMPA', 'NMDA', 'GABA_A', 'GABA_B']
for s in syns:
g = np.array([tcd(s, 'Relay', vm) for vm in v])
plt.plot(v, g)
plt.legend(syns)
plt.show()
| gpl-2.0 |
fengzhyuan/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 221 | 5517 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float)
y.fill(0.0)
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float)
y.fill(1.0)
sw = np.ones(102, dtype=np.float)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
vortex-ape/scikit-learn | examples/linear_model/plot_ard.py | 43 | 3912 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
We also plot predictions and uncertainties for ARD
for one dimensional regression using polynomial feature expansion.
Note the uncertainty starts going up on the right side of the plot.
This is because these test samples are outside of the range of the training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
# #############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
# #############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
# #############################################################################
# Plot the true weights, the estimated weights, the histogram of the
# weights, and predictions with standard deviations
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, color='darkblue', linestyle='-', linewidth=2,
label="ARD estimate")
plt.plot(ols.coef_, color='yellowgreen', linestyle=':', linewidth=2,
label="OLS estimate")
plt.plot(w, color='orange', linestyle='-', linewidth=2, label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, color='navy', log=True)
plt.scatter(clf.coef_[relevant_features], np.full(len(relevant_features), 5.),
color='gold', marker='o', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_, color='navy', linewidth=2)
plt.ylabel("Score")
plt.xlabel("Iterations")
# Plotting some predictions for polynomial regression
def f(x, noise_amount):
y = np.sqrt(x) * np.sin(x)
noise = np.random.normal(0, 1, len(x))
return y + noise_amount * noise
degree = 10
X = np.linspace(0, 10, 100)
y = f(X, noise_amount=1)
clf_poly = ARDRegression(threshold_lambda=1e5)
clf_poly.fit(np.vander(X, degree), y)
X_plot = np.linspace(0, 11, 25)
y_plot = f(X_plot, noise_amount=0)
y_mean, y_std = clf_poly.predict(np.vander(X_plot, degree), return_std=True)
plt.figure(figsize=(6, 5))
plt.errorbar(X_plot, y_mean, y_std, color='navy',
label="Polynomial ARD", linewidth=2)
plt.plot(X_plot, y_plot, color='gold', linewidth=2,
label="Ground Truth")
plt.ylabel("Output y")
plt.xlabel("Feature X")
plt.legend(loc="lower left")
plt.show()
| bsd-3-clause |
Erotemic/hotspotter | hstpl/ensure_tpl.py | 1 | 9527 | from __future__ import division, print_function
import pip as pip_
import parse
from os.path import abspath, dirname
import subprocess
import sys
def pip(*args):
pip_.main(list(args))
def install(package):
pip_('install', package)
def upgrade(package):
pip_('install', package)
core = [
'Pygments>=1.6',
'argparse>=1.2.1',
'openpyxl>=1.6.2', # reads excel xlsx files
'parse>=1.6.2',
'psutil>=1.0.1',
'pyglet>=1.1.4',
'pyparsing>=2.0.1',
'pyreadline>=2.0',
'python-dateutil>=1.5',
'pyzmq>=13.1.0', # distributed computing
'six>=1.3.0', # python 3 support
]
speed = [
'Cython>=0.19.1',
'pylru>=1.0.6',
'llvmpy>=0.9.1',
'numba>=0.3.2',
]
interface = [
'ipython>=1.1.0',
'matplotlib>=1.3.1',
'python-qt>=0.50',
]
science = [
#'PIL>=1.1.7',
'Pillow'
'flann>=1.8.4',
'numpy>=1.7.1',
'opencv-python>=2.4.6',
'pandas>=0.12.0',
'scikit-image>=0.9.3',
'scikit-learn>=0.14a1',
'scipy>=0.12.0',
]
#File "<string>", line 3, in <module>
#File "C:\Python27\Lib\site-packages\matplotlib\backends\backend_webagg.py", line 19, in <module>
#raise RuntimeError("The WebAgg backend requires Tornado.")
devtools = [
'setuptools>=2.0.1'
'distribute>='
'pyinstaller>=2.1'
'line-profiler>=1.0b3',
'flake8>=2.1.0',
'pep8>=1.4.6',
'pyflakes>=0.7.3',
'pylint>=1.0.0',
'runsnakerun>=2.0.3',
'squaremap>=1.0.2',
]
windows = [
'winpexpect',
'WinSys-3.x',
]
other = [
'virtualenv', # on windnows virtualenvwrapper-powershell
'plop', # python low overhead profiler
'sympy',
'supreme' # super resolution
'pytz', # Timezones
'grizzled', # Utility library
'Wand', # ImageMagick
'astroid', # Syntax tree
'boost-python',
'colorama', # ansii colors
'mccabe', # plugin for flake8
'logilab-common', # low level functions
'nose', # unit tester
'networkx'
]
allpkgs = core + speed + interface + science + devtools
def run_process(args, silent=True):
PIPE = subprocess.PIPE
proc = subprocess.Popen(args, stdout=PIPE, stderr=PIPE)
if silent:
(out, err) = proc.communicate()
else:
out_list = []
for line in proc.stdout.readlines():
print(line)
sys.stdout.flush()
out_list.append(line)
out = '\n'.join(out_list)
(_, err) = proc.communicate()
ret = proc.wait()
ret = proc.wait()
return out, err, ret
def pipshow(pkg):
out, err, ret = run_process('pip show ' + pkg)
props_list = out.split('\r\n')[1:-1]
props = dict([prop.split(': ') for prop in props_list])
return props
def pipversion(pkg):
return pipshow(pkg)['Version']
def pipinfo(pkg):
out, err, ret = run_process('pip search ' + pkg)
line_list = out.split('\r\n')
pkginfolist = []
next_entry = []
for line in line_list:
if line.find(' ') != 0:
pkginfolist.append(''.join(next_entry))
next_entry = []
next_entry.append(line)
found = []
for pkginfo in pkginfolist:
if pkginfo.find(pkg + ' ') == 0:
def tryfmt1(pkginfo):
parsestr1 = '{} - {} INSTALLED: {} LATEST: {}'
name, desc, installed, latest = parse.parse(parsestr1, pkginfo)
return name, desc, installed, latest
def tryfmt2(pkginfo):
parsestr2 = '{} - {} INSTALLED: {} (latest)'
name, desc, installed = parse.parse(parsestr2, pkginfo)
latest = installed
return name, desc, installed, latest
def tryfmt3(pkginfo):
parsestr2 = '{} - {}'
name, desc = parse.parse(parsestr2, pkginfo)
installed, latest = ('None', 'None')
return name, desc, installed, latest
for tryfmt in [tryfmt1, tryfmt2, tryfmt3]:
try:
name, desc, installed, latest = tryfmt(pkginfo)
found.append(dict(pkg=name.strip(),
info=desc.strip(),
installed=installed.strip(),
latest=latest.strip()))
break
except TypeError:
pass
if len(found) == 0:
found = [dict(pkg=pkg, info='cannot find', installed=None, latest=None)]
return found
def get_allpkg_info():
allpkg_info = []
for pkgstr in allpkgs:
pkg, version = pkgstr.split('>=')
info = pipinfo(pkg)
tup = (info[0]['pkg'], info[0]['installed'], info[0]['latest'])
print('pkg=%r installed=%r latest=%r' % (tup))
allpkg_info.append(info)
print_allpkg_info(allpkg_info)
return allpkg_info
def print_allpkg_info(allpkg_info):
for info in allpkg_info:
tup = (info[0]['pkg'], info[0]['installed'], info[0]['latest'])
buf = ' ' * max(12 - len(info[0]['pkg']), 0)
print(('%s ' + buf + 'installed=%r latest=%r') % tup)
#installed_list = pip.get_installed_distributions()
#install('runsnake')
PIP_DISABLE_ON_WINDOWS = [
'wxPython',
'PIL',
'Pygments',
'llvmpy',
'matplotlib',
'numba',
'numpy',
'python-qt',
'pyzmg',
'scipy',
]
#Get outdated packages
def get_outdated_packages(allpkg_info, safe=True):
outdated = []
unavailable = []
for info in allpkg_info:
pkg = info[0]['pkg']
latest = info[0]['latest']
installed = info[0]['installed']
if sys.platform == 'win32' and safe and pkg in PIP_DISABLE_ON_WINDOWS:
unavailable.append(info)
elif installed is None or installed == 'None':
unavailable.append(info)
elif latest != installed:
outdated.append(info)
print('Pip does not seem to be managing: \n *' + '\n *'.join([info[0]['pkg'] for info in unavailable]))
print('Updates available for: \n *' + '\n *'.join([info[0]['pkg'] + ' current=' + info[0]['installed'] + ' latest=' + info[0]['latest']for info in outdated]))
return outdated, unavailable
def vd(path):
'view directory'
if sys.platform == 'win32':
return run_process('explorer ' + path)
def write_installer_script(cmd_list, scriptname='installer'):
if sys.platform != 'win32':
ext = '.sh'
cmd_list = ['sudo ' + cmd for cmd in cmd_list]
else:
ext = '.bat'
script_fpath = abspath(scriptname + ext)
with open(script_fpath, 'w') as file_:
file_.write('\n'.join(cmd_list))
vd(dirname(script_fpath))
def uninstall_windows_conflicts():
# Uninstall windows things safely
cmd_list = ['pip uninstall %s' % pkg for pkg in PIP_DISABLE_ON_WINDOWS]
write_installer_script(cmd_list, scriptname='pip_uninstall')
if __name__ == '__main__':
allpkg_info = get_allpkg_info()
outdated, unavailable = get_outdated_packages(allpkg_info, False)
cmd_list = ['pip install %s --upgrade' % info[0]['pkg'] for info in outdated]
write_installer_script(cmd_list, scriptname='pip_upgrade')
print('\n'.join(cmd_list))
# sparesehash
'''
sudo add-apt-repository "http://downloads.skewed.de/apt/precise precise universe"
sudo apt-key adv --keyserver http://pgp.skewed.de --recv-keys 98507F25
sudo apt-get update
deb http://downloads.skewed.de/apt/DISTRIBUTION DISTRIBUTION universe
deb-src http://downloads.skewed.de/apt/DISTRIBUTION DISTRIBUTION universe
cd ~/tmp
alias untargz='tar xvzf '
alias untarbz='tar xvjf '
alias untar='tar xvf '
wget https://code.google.com/p/sparsehash/downloads/sparsehash-2.0.2.tar.gz
untargz sparsehash-2.0.2.tar.gz
mv ~/tmp/sparsehash-2.0.2 ~/code/sparsehash-2.0.2
cd ~/code/sparsehash-2.0.2
make -j9 && sudo make install
'''
# Gephi
'''
update-alternatives --config java
update-alternatives --config javac
sudo apt-add-repository ppa:rockclimb/gephi-daily
sudo apt-get install gephi
'''
# Py-graph-tool
#
'''
sudo apt-get install libboost-all-dev
sudo apt-get install libtool
sudo apt-get install libcgal-dev
sudo apt-get install libcgal-demo
sudo apt-get install libcairomm-1.0-dev
sudo apt-get install python-cairo-dev
sudo aptitude install python-cairo
sudo aptitude install python-gi-cairo
sudo apt-get install libcairo2
sudo apt-get install libcairo2-dev
sudo apt-get install python-dev
sudo apt-get install python-gobject
sudo apt-get install python-gobject-dev
# WINDOWS GCAL: http://www.cgal.org/download.html
cd ~/code
git clone git://git.skewed.de/graph-tool
cd graph-tool
git checkout -b release-2.2.9 release-2.2.9
rm *; git checkout *
libtoolize --force
./autogen.sh
./configure
make -j9 && sudo make install
#rm *; git checkout *
#libtoolize --force
#aclocal ; autoheader; automake ; autoconf
#./configure
#make -j9
#autoreconf -f -i -Wall,no-obsolete
######
#autoreconf -vif
#####
#rm *; git checkout *
#libtoolize --force
#aclocal
#autoheader
#automake --force-missing --add-missing
#autoconf
#autoreconf -f -i -Wall,no-obsolete
'''
# UBUNTU:
# apt-get install python-graph-tool
# ARCH:
#yaourt -S python2-graph-tool
#
# MACPORTS:
#port install py-graph-tool
# PYTHON-Graph
# sudo pip install python-graph-core
# sudo pip install python-graph-dot
# Python-igraph
#sudo pip install python-igraph
'''
sudo apt-get install gcal
sudo apt-get install libsparsehash-dev
sudo apt-get install sparsehash
sudo apt-get install python-gi-cairo
sudo apt-get install graphviz
'''
| apache-2.0 |
peterfpeterson/mantid | qt/python/mantidqt/utils/testing/mocks/mock_plotlib.py | 3 | 1860 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
"""
The aim of this module is to provide a swap-in object for
the matplotlib.pyplot module, which has the same interface.
Every function is instantiated as a mock, which allows to record
calls to the plotting functions, without executing any real
matplotlib code that requires a running GUI application.
The mocking follows the real matplotlib structure so that it can be easily
swapped in when necessary. There are two ways to do that - either
by injecting the plotting dependency in the constructor or
by using mock.patch to replace the 'matplotlib.pyplot'.
Note that not all matplotlib.pyplot functions have been added,
only the ones that have been necessary so far. If another function
needs to be mocked it can be freely added in the relevant class below
and it should not break any existing tests.
"""
from unittest.mock import Mock
class MockAx:
def __init__(self):
self.plot = Mock()
self.scatter = Mock()
self.errorbar = Mock()
self.legend = Mock()
self.set_xlabel = Mock()
self.set_ylabel = Mock()
class MockCanvas:
def __init__(self):
self.set_window_title = Mock()
class MockFig:
def __init__(self):
self.show = Mock()
self.mock_canvas = MockCanvas()
self.canvas = Mock(return_value=self.mock_canvas)
class MockPlotLib:
def __init__(self):
self.mock_ax = MockAx()
self.mock_fig = MockFig()
self.subplots = Mock(return_value=[self.mock_fig, self.mock_ax])
| gpl-3.0 |
marcoitur/FreeCAD | src/Mod/Plot/InitGui.py | 18 | 2920 | #***************************************************************************
#* *
#* Copyright (c) 2011, 2012 *
#* Jose Luis Cercos Pita <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
class PlotWorkbench(Workbench):
"""Workbench of Plot module."""
from plotUtils import Paths
import PlotGui
Icon = 'Icon.svg'
MenuText = "Plot"
ToolTip = ("The Plot module is used to edit/save output plots performed "
"by other tools")
def Initialize(self):
from PySide import QtCore, QtGui
cmdlst = ["Plot_SaveFig",
"Plot_Axes",
"Plot_Series",
"Plot_Grid",
"Plot_Legend",
"Plot_Labels",
"Plot_Positions"]
self.appendToolbar(str(QtCore.QT_TRANSLATE_NOOP(
"Plot",
"Plot edition tools")), cmdlst)
self.appendMenu(str(QtCore.QT_TRANSLATE_NOOP(
"Plot",
"Plot")), cmdlst)
try:
import matplotlib
except ImportError:
from PySide import QtCore, QtGui
msg = QtGui.QApplication.translate(
"plot_console",
"matplotlib not found, Plot module will be disabled",
None,
QtGui.QApplication.UnicodeUTF8)
FreeCAD.Console.PrintMessage(msg + '\n')
Gui.addWorkbench(PlotWorkbench())
| lgpl-2.1 |
davidparks21/qso_lya_detection_pipeline | lucid_work/plot_vis.py | 1 | 1975 | # This file plots the saved visualizations
# Visulaoizations were saved as '.npy' files, this file
# plots and saves as '.png' files
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
def save_vis(layer, vis, channel_n):
"""
This file takes an array of pixel values from an individual visualization,
plots it, and saves the image to a png file
:param layer: layer we are saving
:type layer: string
:param vis: array of pixel values for an individual visualization to be plotted
:type vis: array
:param channel_n: channel of the visualization we are saving
:type channel_n: int
:return: nothing
"""
fig = plt.figure(frameon=False);
ax = plt.Axes(fig, [0, 0, 1, 1]);
ax.set_axis_off();
fig.add_axes(ax);
ax.plot(vis, 'black');
ax.set(xlim=(0, 400));
file_save = 'data/neuron_vis/' + layer + '/' + layer + '_' + str(channel_n) +'.png'
fig.savefig(file_save);
plt.close(fig)
def create_layer_vis(filein, layer):
"""
This file loads in visualization array of pixels and call save_vis() in
order to plot each visualization separately.
:param filein: path of the file that has layer visualization data
:type filein: string
:param layer: the layer we are saving visualizations for
:type layer: string
:return: nothing
"""
imgs = np.load(filein)
for i in range(len(imgs)):
save_vis(layer, imgs[i], i)
def main():
create_layer_vis('data/conv1.npy', 'conv1')
create_layer_vis('data/conv1_relu.npy', 'conv1_relu')
create_layer_vis('data/pool1.npy', 'pool1')
create_layer_vis('data/conv2.npy', 'conv2')
create_layer_vis('data/conv2_relu.npy', 'conv2_relu')
create_layer_vis('data/pool2.npy', 'pool2')
create_layer_vis('data/conv3.npy', 'conv3')
create_layer_vis('data/conv3_relu.npy', 'conv3_relu')
create_layer_vis('data/pool3.npy', 'pool3')
if __name__ == "__main__":
main()
| mit |
juliantaylor/scipy | doc/source/conf.py | 6 | 10840 | # -*- coding: utf-8 -*-
import sys, os, re
# Check Sphinx version
import sphinx
if sphinx.__version__ < "1.1":
raise RuntimeError("Sphinx 1.1 or newer required")
needs_sphinx = '1.1'
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath('../sphinxext'))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'numpydoc',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
'sphinx.ext.autosummary']
# Determine if the matplotlib has a recent enough version of the
# plot_directive.
try:
from matplotlib.sphinxext import plot_directive
except ImportError:
use_matplotlib_plot_directive = False
else:
try:
use_matplotlib_plot_directive = (plot_directive.__version__ >= 2)
except AttributeError:
use_matplotlib_plot_directive = False
if use_matplotlib_plot_directive:
extensions.append('matplotlib.sphinxext.plot_directive')
else:
raise RuntimeError("You need a recent enough version of matplotlib")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'SciPy'
copyright = '2008-2009, The Scipy community'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
import scipy
version = re.sub(r'\.dev-.*$', r'.dev', scipy.__version__)
release = scipy.__version__
print "Scipy (VERSION %s)" % (version,)
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "autolink"
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme')
if os.path.isdir(themedir):
html_theme = 'scipy'
html_theme_path = [themedir]
if 'scipyorg' in tags:
# Build for the scipy.org website
html_theme_options = {
"edit_link": True,
"sidebar": "right",
"scipy_org_logo": True,
"rootlinks": [("http://scipy.org/", "Scipy.org"),
("http://docs.scipy.org/", "Docs")]
}
else:
# Default build
html_theme_options = {
"edit_link": False,
"sidebar": "left",
"scipy_org_logo": False,
"rootlinks": []
}
html_logo = '_static/scipyshiny_small.png'
html_sidebars = {'index': 'indexsidebar.html'}
else:
# Build without scipy.org sphinx theme present
if 'scipyorg' in tags:
raise RuntimeError("Get the scipy-sphinx-theme first, "
"via git submodule init & update")
else:
html_style = 'scipy_fallback.css'
html_logo = '_static/scipyshiny_small.png'
html_sidebars = {'index': 'indexsidebar.html'}
html_title = "%s v%s Reference Guide" % (project, version)
html_static_path = ['_static']
html_last_updated_fmt = '%b %d, %Y'
html_additional_pages = {}
html_use_modindex = True
html_copy_source = False
html_file_suffix = '.html'
htmlhelp_basename = 'scipy'
pngmath_use_preview = True
pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent']
# -----------------------------------------------------------------------------
# LaTeX output
# -----------------------------------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = 'Written by the SciPy community'
latex_documents = [
('index', 'scipy-ref.tex', 'SciPy Reference Guide', _stdauthor, 'manual'),
# ('user/index', 'scipy-user.tex', 'SciPy User Guide',
# _stdauthor, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters etc. sections, align uniformly, and adjust label emphasis
\usepackage{expdlist}
\let\latexdescription=\description
\let\endlatexdescription=\enddescription
\renewenvironment{description}%
{\begin{latexdescription}[\setleftmargin{60pt}\breaklabel\setlabelstyle{\bfseries\itshape}]}%
{\end{latexdescription}}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\normalfont\bfseries\itshape}%
{\py@NormalColor}{0em}{\py@NormalColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
% Save vertical space in parameter lists and elsewhere
\makeatletter
\renewenvironment{quote}%
{\list{}{\topsep=0pt%
\parsep \z@ \@plus\p@}%
\item\relax}%
{\endlist}
\makeatother
% Fix footer/header
\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {
'http://docs.python.org/dev': None,
'http://docs.scipy.org/doc/numpy': None,
}
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
# If we want to do a phantom import from an XML file for all autodocs
phantom_import_file = 'dump.xml'
# Generate plots for example sections
numpydoc_use_plots = True
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
if sphinx.__version__ >= "0.7":
import glob
autosummary_generate = glob.glob("*.rst")
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
#------------------------------------------------------------------------------
# Plot
#------------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
import scipy as sp
np.random.seed(123)
"""
plot_include_source = True
plot_formats = [('png', 96), 'pdf']
plot_html_show_formats = False
import math
phi = (math.sqrt(5) + 1)/2
font_size = 13*72/96.0 # 13 px
plot_rcparams = {
'font.size': font_size,
'axes.titlesize': font_size,
'axes.labelsize': font_size,
'xtick.labelsize': font_size,
'ytick.labelsize': font_size,
'legend.fontsize': font_size,
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
}
if not use_matplotlib_plot_directive:
import matplotlib
matplotlib.rcParams.update(plot_rcparams)
# -----------------------------------------------------------------------------
# Source code links
# -----------------------------------------------------------------------------
import inspect
from os.path import relpath, dirname
for name in ['sphinx.ext.linkcode', 'linkcode', 'numpydoc.linkcode']:
try:
__import__(name)
extensions.append(name)
break
except ImportError:
pass
else:
print "NOTE: linkcode extension not found -- no links to source generated"
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.findsource(obj)
except:
lineno = None
if lineno:
linespec = "#L%d" % (lineno + 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(scipy.__file__))
if 'dev' in scipy.__version__:
return "http://github.com/scipy/scipy/blob/master/scipy/%s%s" % (
fn, linespec)
else:
return "http://github.com/scipy/scipy/blob/v%s/scipy/%s%s" % (
scipy.__version__, fn, linespec)
| bsd-3-clause |
Nyker510/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
fbagirov/scikit-learn | sklearn/cluster/mean_shift_.py | 106 | 14056 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None, got %f" %
bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
stop_thresh = 1e-3 * bandwidth # when mean has converged
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# For each seed, climb gradient until convergence or max_iter
for my_mean in seeds:
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
center_intensity_dict[tuple(my_mean)] = len(points_within)
break
completed_iterations += 1
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f, using data"
" points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
LennonLab/Micro-Encounter | tools/condense-data.py | 1 | 2762 | from __future__ import division
import linecache
from math import isnan
import numpy as np
from numpy import mean
import pandas as pd
import sys
import os
mydir = os.path.expanduser("~/GitHub/Micro-Encounter")
sys.path.append(mydir + "/model/col_labels")
labels = linecache.getline(mydir + '/model/col_labels/condensed-data.txt', 1)
with open(mydir + '/results/simulated_data/SimData.csv', 'w+') as text_file:
text_file.write(labels)
df = pd.read_csv(mydir + '/results/simulated_data/2016_09_18_SimData.csv')
#df = df.convert_objects(convert_numeric=True).dropna()
#df = df[df['sim'] > 500]
#-------------------------DATA TRANSFORMATIONS -----------------------
df2 = pd.DataFrame({'Encounters' : np.log10(df['Encounters'].groupby(df['sim']).mean())})
df2 = df2[np.isfinite(df2['Encounters'])]
df2['SpatialComplexity'] = df['SpatialComplexity'].groupby(df['sim']).unique()
df2['SpatialComplexity'] = [df2['SpatialComplexity'][i][0] for i in df2['SpatialComplexity'].keys()]
df2['TrophicComplexity'] = df['TrophicComplexity'].groupby(df['sim']).unique()
df2['TrophicComplexity'] = [df2['TrophicComplexity'][i][0] for i in df2['TrophicComplexity'].keys()]
df2['ResourceComplexity'] = df['ResourceComplexity'].groupby(df['sim']).unique()
df2['ResourceComplexity'] = [df2['ResourceComplexity'][i][0] for i in df2['ResourceComplexity'].keys()]
df2['NumberDead'] = df['numDead'].groupby(df['sim']).mean()
df2['TotalAbundance'] = df['N'].groupby(df['sim']).mean()
df2['DormantN'] = df['DormantN'].groupby(df['sim']).mean()
df2['%Dormant'] = df2['DormantN']/df2['TotalAbundance']
df2['Productivity'] = df['PRODI'].groupby(df['sim']).mean()
df2['ActiveN'] = df2['TotalAbundance'] - df2['DormantN']
df2['TotalResources'] = df['R'].groupby(df['sim']).mean()
df2['ResourceInflow'] = df['ResInflow'].groupby(df['sim']).mean()
# TRAITS
df2['MeanCellQuota'] = df['MeanCellQuota'].groupby(df['sim']).mean()
df2['MaxGrowth'] = df['MaxGrowth'].groupby(df['sim']).mean()
df2['MaxMaint'] = df['MaxMaint'].groupby(df['sim']).mean()
df2['MaxDispersal'] = df['MaxDispersal'].groupby(df['sim']).mean()
df2['MaxRPF'] = df['MaxRPF'].groupby(df['sim']).mean()
df2['MaxMaintFactor'] = df['MaxMainFactor'].groupby(df['sim']).mean()
df2['SpeciesSpecificDispersal'] = df['SpeciesDisp'].groupby(df['sim']).mean()
df2['SpeciesSpecificMaintenance'] = df['SpeciesMaint'].groupby(df['sim']).mean()
df2['SpeciesSpecificGrowth'] = df['SpecificGrowth'].groupby(df['sim']).mean()
df2['PerCapitaGrowth'] = df['PerCapitaGrowth'].groupby(df['sim']).mean()
df2['PerCapitaMaint'] = df['PerCapitaMaint'].groupby(df['sim']).mean()
df2['PerCapitaDispersal'] = df['PerCapitaDisp'].groupby(df['sim']).mean()
path = mydir + '/results/simulated_data/SimData_condensed.csv'
df2.to_csv(path, sep=',')
| gpl-3.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/tests/plotting/test_frame.py | 3 | 111089 | # coding: utf-8
""" Test cases for DataFrame.plot """
import pytest
import string
import warnings
from datetime import datetime, date
import pandas as pd
from pandas import (Series, DataFrame, MultiIndex, PeriodIndex, date_range,
bdate_range)
from pandas.core.dtypes.api import is_list_like
from pandas.compat import range, lrange, lmap, lzip, u, zip, PY3
from pandas.io.formats.printing import pprint_thing
import pandas.util.testing as tm
from pandas.util.testing import slow
from pandas.core.config import set_option
import numpy as np
from numpy.random import rand, randn
import pandas.plotting as plotting
from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works,
_skip_if_no_scipy_gaussian_kde,
_ok_for_gaussian_kde)
tm._skip_module_if_no_mpl()
class TestDataFramePlots(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
import matplotlib as mpl
mpl.rcdefaults()
self.tdf = tm.makeTimeDataFrame()
self.hexbin_df = DataFrame({"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange(20) + np.random.uniform(
size=20)})
@slow
def test_plot(self):
df = self.tdf
_check_plot_works(df.plot, grid=False)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot,
subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot,
subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot,
subplots=True, use_index=False)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
# mpl >= 1.5.2 (or slightly below) throw AttributError
with pytest.raises((TypeError, AttributeError)):
df.plot.line(blarg=True)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, use_index=True)
_check_plot_works(df.plot, sort_columns=False)
_check_plot_works(df.plot, yticks=[1, 5, 10])
_check_plot_works(df.plot, xticks=[1, 5, 10])
_check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.plot, subplots=True, title='blah')
# We have to redo it here because _check_plot_works does two plots,
# once without an ax kwarg and once with an ax kwarg and the new sharex
# behaviour does not remove the visibility of the latter axis (as ax is
# present). see: https://github.com/pandas-dev/pandas/issues/9737
axes = df.plot(subplots=True, title='blah')
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
# axes[0].figure.savefig("test.png")
for ax in axes[:2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible([ax.xaxis.get_label()], visible=False)
for ax in [axes[2]]:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible([ax.xaxis.get_label()])
self._check_ticks_props(ax, xrot=0)
_check_plot_works(df.plot, title='blah')
tuples = lzip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
_check_plot_works(df.plot, use_index=True)
# unicode
index = MultiIndex.from_tuples([(u('\u03b1'), 0),
(u('\u03b1'), 1),
(u('\u03b2'), 2),
(u('\u03b2'), 3),
(u('\u03b3'), 4),
(u('\u03b3'), 5),
(u('\u03b4'), 6),
(u('\u03b4'), 7)], names=['i0', 'i1'])
columns = MultiIndex.from_tuples([('bar', u('\u0394')),
('bar', u('\u0395'))], names=['c0',
'c1'])
df = DataFrame(np.random.randint(0, 10, (8, 2)),
columns=columns,
index=index)
_check_plot_works(df.plot, title=u('\u03A3'))
# GH 6951
# Test with single column
df = DataFrame({'x': np.random.rand(10)})
axes = _check_plot_works(df.plot.bar, subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
axes = _check_plot_works(df.plot.bar, subplots=True, layout=(-1, 1))
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.plot.bar(subplots=True, ax=ax)
assert len(axes) == 1
if self.mpl_ge_1_5_0:
result = ax.axes
else:
result = ax.get_axes() # deprecated
assert result is axes[0]
# GH 15516
def test_mpl2_color_cycle_str(self):
# test CN mpl 2.0 color cycle
if self.mpl_ge_2_0_0:
colors = ['C' + str(x) for x in range(10)]
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
for c in colors:
_check_plot_works(df.plot, color=c)
else:
pytest.skip("not supported in matplotlib < 2.0.0")
def test_color_empty_string(self):
df = DataFrame(randn(10, 2))
with pytest.raises(ValueError):
df.plot(color='')
def test_color_and_style_arguments(self):
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
# passing both 'color' and 'style' arguments should be allowed
# if there is no color symbol in the style strings:
ax = df.plot(color=['red', 'black'], style=['-', '--'])
# check that the linestyles are correctly set:
linestyle = [line.get_linestyle() for line in ax.lines]
assert linestyle == ['-', '--']
# check that the colors are correctly set:
color = [line.get_color() for line in ax.lines]
assert color == ['red', 'black']
# passing both 'color' and 'style' arguments should not be allowed
# if there is a color symbol in the style strings:
with pytest.raises(ValueError):
df.plot(color=['red', 'black'], style=['k-', 'r--'])
def test_nonnumeric_exclude(self):
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]})
ax = df.plot()
assert len(ax.get_lines()) == 1 # B was plotted
@slow
def test_implicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b')
self._check_text_labels(ax.xaxis.get_label(), 'a')
@slow
def test_donot_overwrite_index_name(self):
# GH 8494
df = DataFrame(randn(2, 2), columns=['a', 'b'])
df.index.name = 'NAME'
df.plot(y='b', label='LABEL')
assert df.index.name == 'NAME'
@slow
def test_plot_xy(self):
# columns.inferred_type == 'string'
df = self.tdf
self._check_data(df.plot(x=0, y=1), df.set_index('A')['B'].plot())
self._check_data(df.plot(x=0), df.set_index('A').plot())
self._check_data(df.plot(y=0), df.B.plot())
self._check_data(df.plot(x='A', y='B'), df.set_index('A').B.plot())
self._check_data(df.plot(x='A'), df.set_index('A').plot())
self._check_data(df.plot(y='B'), df.B.plot())
# columns.inferred_type == 'integer'
df.columns = lrange(1, len(df.columns) + 1)
self._check_data(df.plot(x=1, y=2), df.set_index(1)[2].plot())
self._check_data(df.plot(x=1), df.set_index(1).plot())
self._check_data(df.plot(y=1), df[1].plot())
# figsize and title
ax = df.plot(x=1, y=2, title='Test', figsize=(16, 8))
self._check_text_labels(ax.title, 'Test')
self._check_axes_shape(ax, axes_num=1, layout=(1, 1),
figsize=(16., 8.))
# columns.inferred_type == 'mixed'
# TODO add MultiIndex test
@slow
def test_logscales(self):
df = DataFrame({'a': np.arange(100)}, index=np.arange(100))
ax = df.plot(logy=True)
self._check_ax_scales(ax, yaxis='log')
ax = df.plot(logx=True)
self._check_ax_scales(ax, xaxis='log')
ax = df.plot(loglog=True)
self._check_ax_scales(ax, xaxis='log', yaxis='log')
@slow
def test_xcompat(self):
import pandas as pd
df = self.tdf
ax = df.plot(x_compat=True)
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
pd.plotting.plot_params['xaxis.compat'] = True
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
pd.plotting.plot_params['x_compat'] = False
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
tm.close()
# useful if you're plotting a bunch together
with pd.plotting.plot_params.use('x_compat', True):
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
def test_period_compat(self):
# GH 9012
# period-array conversions
df = DataFrame(
np.random.rand(21, 2),
index=bdate_range(datetime(2000, 1, 1), datetime(2000, 1, 31)),
columns=['a', 'b'])
df.plot()
self.plt.axhline(y=0)
tm.close()
def test_unsorted_index(self):
df = DataFrame({'y': np.arange(100)}, index=np.arange(99, -1, -1),
dtype=np.int64)
ax = df.plot()
l = ax.get_lines()[0]
rs = l.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name='y')
tm.assert_series_equal(rs, df.y, check_index_type=False)
tm.close()
df.index = pd.Index(np.arange(99, -1, -1), dtype=np.float64)
ax = df.plot()
l = ax.get_lines()[0]
rs = l.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name='y')
tm.assert_series_equal(rs, df.y)
@slow
def test_subplots(self):
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
for kind in ['bar', 'barh', 'line', 'area']:
axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
assert axes.shape == (3, )
for ax, column in zip(axes, df.columns):
self._check_legend_labels(ax,
labels=[pprint_thing(column)])
for ax in axes[:-2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(
ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
axes = df.plot(kind=kind, subplots=True, sharex=False)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
axes = df.plot(kind=kind, subplots=True, legend=False)
for ax in axes:
assert ax.get_legend() is None
@slow
def test_subplots_timeseries(self):
idx = date_range(start='2014-07-01', freq='M', periods=10)
df = DataFrame(np.random.rand(10, 3), index=idx)
for kind in ['line', 'area']:
axes = df.plot(kind=kind, subplots=True, sharex=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes[:-2]:
# GH 7801
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(
ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
self._check_ticks_props(axes, xrot=0)
axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45,
fontsize=7)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
self._check_ticks_props(ax, xlabelsize=7, xrot=45,
ylabelsize=7)
@slow
def test_subplots_layout(self):
# GH 6667
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(2, -1))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
assert axes.shape == (1, 4)
axes = df.plot(subplots=True, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
assert axes.shape == (1, 4)
axes = df.plot(subplots=True, layout=(4, -1))
self._check_axes_shape(axes, axes_num=3, layout=(4, 1))
assert axes.shape == (4, 1)
with pytest.raises(ValueError):
df.plot(subplots=True, layout=(1, 1))
with pytest.raises(ValueError):
df.plot(subplots=True, layout=(-1, -1))
# single column
df = DataFrame(np.random.rand(10, 1),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
assert axes.shape == (1, )
axes = df.plot(subplots=True, layout=(3, 3))
self._check_axes_shape(axes, axes_num=1, layout=(3, 3))
assert axes.shape == (3, 3)
@slow
def test_subplots_warnings(self):
# GH 9464
warnings.simplefilter('error')
try:
df = DataFrame(np.random.randn(100, 4))
df.plot(subplots=True, layout=(3, 2))
df = DataFrame(np.random.randn(100, 4),
index=date_range('1/1/2000', periods=100))
df.plot(subplots=True, layout=(3, 2))
except Warning as w:
self.fail(w)
warnings.simplefilter('default')
@slow
def test_subplots_multiple_axes(self):
# GH 5353, 6970, GH 7069
fig, axes = self.plt.subplots(2, 3)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
returned = df.plot(subplots=True, ax=axes[0], sharex=False,
sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
assert returned.shape == (3, )
assert returned[0].figure is fig
# draw on second row
returned = df.plot(subplots=True, ax=axes[1], sharex=False,
sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
assert returned.shape == (3, )
assert returned[0].figure is fig
self._check_axes_shape(axes, axes_num=6, layout=(2, 3))
tm.close()
with pytest.raises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
df.plot(subplots=True, ax=axes)
# pass 2-dim axes and invalid layout
# invalid lauout should not affect to input and return value
# (show warning is tested in
# TestDataFrameGroupByPlots.test_grouped_box_multiple_axes
fig, axes = self.plt.subplots(2, 2)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
df = DataFrame(np.random.rand(10, 4),
index=list(string.ascii_letters[:10]))
returned = df.plot(subplots=True, ax=axes, layout=(2, 1),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4, )
returned = df.plot(subplots=True, ax=axes, layout=(2, -1),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4, )
returned = df.plot(subplots=True, ax=axes, layout=(-1, 2),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4, )
# single column
fig, axes = self.plt.subplots(1, 1)
df = DataFrame(np.random.rand(10, 1),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, ax=[axes], sharex=False, sharey=False)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
assert axes.shape == (1, )
def test_subplots_ts_share_axes(self):
# GH 3964
fig, axes = self.plt.subplots(3, 3, sharex=True, sharey=True)
self.plt.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3)
df = DataFrame(
np.random.randn(10, 9),
index=date_range(start='2014-07-01', freq='M', periods=10))
for i, ax in enumerate(axes.ravel()):
df[i].plot(ax=ax, fontsize=5)
# Rows other than bottom should not be visible
for ax in axes[0:-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=False)
# Bottom row should be visible
for ax in axes[-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=True)
# First column should be visible
for ax in axes[[0, 1, 2], [0]].ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
# Other columns should not be visible
for ax in axes[[0, 1, 2], [1]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes[[0, 1, 2], [2]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
def test_subplots_sharex_axes_existing_axes(self):
# GH 9158
d = {'A': [1., 2., 3., 4.], 'B': [4., 3., 2., 1.], 'C': [5, 1, 3, 4]}
df = DataFrame(d, index=date_range('2014 10 11', '2014 10 14'))
axes = df[['A', 'B']].plot(subplots=True)
df['C'].plot(ax=axes[0], secondary_y=True)
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
for ax in axes.ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
@slow
def test_subplots_dup_columns(self):
# GH 10962
df = DataFrame(np.random.rand(5, 5), columns=list('aaaaa'))
axes = df.plot(subplots=True)
for ax in axes:
self._check_legend_labels(ax, labels=['a'])
assert len(ax.lines) == 1
tm.close()
axes = df.plot(subplots=True, secondary_y='a')
for ax in axes:
# (right) is only attached when subplots=False
self._check_legend_labels(ax, labels=['a'])
assert len(ax.lines) == 1
tm.close()
ax = df.plot(secondary_y='a')
self._check_legend_labels(ax, labels=['a (right)'] * 5)
assert len(ax.lines) == 0
assert len(ax.right_ax.lines) == 5
def test_negative_log(self):
df = - DataFrame(rand(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
with pytest.raises(ValueError):
df.plot.area(logy=True)
with pytest.raises(ValueError):
df.plot.area(loglog=True)
def _compare_stacked_y_cood(self, normal_lines, stacked_lines):
base = np.zeros(len(normal_lines[0].get_data()[1]))
for nl, sl in zip(normal_lines, stacked_lines):
base += nl.get_data()[1] # get y coodinates
sy = sl.get_data()[1]
tm.assert_numpy_array_equal(base, sy)
def test_line_area_stacked(self):
with tm.RNGContext(42):
df = DataFrame(rand(6, 4), columns=['w', 'x', 'y', 'z'])
neg_df = -df
# each column has either positive or negative value
sep_df = DataFrame({'w': rand(6),
'x': rand(6),
'y': -rand(6),
'z': -rand(6)})
# each column has positive-negative mixed value
mixed_df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['w', 'x', 'y', 'z'])
for kind in ['line', 'area']:
ax1 = _check_plot_works(df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
ax1 = _check_plot_works(neg_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(neg_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
ax1 = _check_plot_works(sep_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(sep_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines[:2], ax2.lines[:2])
self._compare_stacked_y_cood(ax1.lines[2:], ax2.lines[2:])
_check_plot_works(mixed_df.plot, stacked=False)
with pytest.raises(ValueError):
mixed_df.plot(stacked=True)
_check_plot_works(df.plot, kind=kind, logx=True, stacked=True)
def test_line_area_nan_df(self):
values1 = [1, 2, np.nan, 3]
values2 = [3, np.nan, 2, 1]
df = DataFrame({'a': values1, 'b': values2})
tdf = DataFrame({'a': values1,
'b': values2}, index=tm.makeDateIndex(k=4))
for d in [df, tdf]:
ax = _check_plot_works(d.plot)
masked1 = ax.lines[0].get_ydata()
masked2 = ax.lines[1].get_ydata()
# remove nan for comparison purpose
exp = np.array([1, 2, 3], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked1.data, 2), exp)
exp = np.array([3, 2, 1], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked2.data, 1), exp)
tm.assert_numpy_array_equal(
masked1.mask, np.array([False, False, True, False]))
tm.assert_numpy_array_equal(
masked2.mask, np.array([False, True, False, False]))
expected1 = np.array([1, 2, 0, 3], dtype=np.float64)
expected2 = np.array([3, 0, 2, 1], dtype=np.float64)
ax = _check_plot_works(d.plot, stacked=True)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(),
expected1 + expected2)
ax = _check_plot_works(d.plot.area)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(),
expected1 + expected2)
ax = _check_plot_works(d.plot.area, stacked=False)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2)
def test_line_lim(self):
df = DataFrame(rand(6, 3), columns=['x', 'y', 'z'])
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin == lines[0].get_data()[0][0]
assert xmax == lines[0].get_data()[0][-1]
ax = df.plot(secondary_y=True)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin == lines[0].get_data()[0][0]
assert xmax == lines[0].get_data()[0][-1]
axes = df.plot(secondary_y=True, subplots=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes:
assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin == lines[0].get_data()[0][0]
assert xmax == lines[0].get_data()[0][-1]
def test_area_lim(self):
df = DataFrame(rand(6, 4), columns=['x', 'y', 'z', 'four'])
neg_df = -df
for stacked in [True, False]:
ax = _check_plot_works(df.plot.area, stacked=stacked)
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
lines = ax.get_lines()
assert xmin == lines[0].get_data()[0][0]
assert xmax == lines[0].get_data()[0][-1]
assert ymin == 0
ax = _check_plot_works(neg_df.plot.area, stacked=stacked)
ymin, ymax = ax.get_ylim()
assert ymax == 0
@slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
default_colors = self._maybe_unpack_cycler(plt.rcParams)
df = DataFrame(randn(5, 5))
ax = df.plot.bar()
self._check_colors(ax.patches[::5], facecolors=default_colors[:5])
tm.close()
custom_colors = 'rgcby'
ax = df.plot.bar(color=custom_colors)
self._check_colors(ax.patches[::5], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.bar(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.bar(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
ax = df.loc[:, [0]].plot.bar(color='DodgerBlue')
self._check_colors([ax.patches[0]], facecolors=['DodgerBlue'])
tm.close()
ax = df.plot(kind='bar', color='green')
self._check_colors(ax.patches[::5], facecolors=['green'] * 5)
tm.close()
@slow
def test_bar_linewidth(self):
df = DataFrame(randn(5, 5))
# regular
ax = df.plot.bar(linewidth=2)
for r in ax.patches:
assert r.get_linewidth() == 2
# stacked
ax = df.plot.bar(stacked=True, linewidth=2)
for r in ax.patches:
assert r.get_linewidth() == 2
# subplots
axes = df.plot.bar(linewidth=2, subplots=True)
self._check_axes_shape(axes, axes_num=5, layout=(5, 1))
for ax in axes:
for r in ax.patches:
assert r.get_linewidth() == 2
@slow
def test_bar_barwidth(self):
df = DataFrame(randn(5, 5))
width = 0.9
# regular
ax = df.plot.bar(width=width)
for r in ax.patches:
assert r.get_width() == width / len(df.columns)
# stacked
ax = df.plot.bar(stacked=True, width=width)
for r in ax.patches:
assert r.get_width() == width
# horizontal regular
ax = df.plot.barh(width=width)
for r in ax.patches:
assert r.get_height() == width / len(df.columns)
# horizontal stacked
ax = df.plot.barh(stacked=True, width=width)
for r in ax.patches:
assert r.get_height() == width
# subplots
axes = df.plot.bar(width=width, subplots=True)
for ax in axes:
for r in ax.patches:
assert r.get_width() == width
# horizontal subplots
axes = df.plot.barh(width=width, subplots=True)
for ax in axes:
for r in ax.patches:
assert r.get_height() == width
@slow
def test_bar_barwidth_position(self):
df = DataFrame(randn(5, 5))
self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9,
position=0.2)
@slow
def test_bar_barwidth_position_int(self):
# GH 12979
df = DataFrame(randn(5, 5))
for w in [1, 1.]:
ax = df.plot.bar(stacked=True, width=w)
ticks = ax.xaxis.get_ticklocs()
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4]))
assert ax.get_xlim() == (-0.75, 4.75)
# check left-edge of bars
assert ax.patches[0].get_x() == -0.5
assert ax.patches[-1].get_x() == 3.5
self._check_bar_alignment(df, kind='bar', stacked=True, width=1)
self._check_bar_alignment(df, kind='barh', stacked=False, width=1)
self._check_bar_alignment(df, kind='barh', stacked=True, width=1)
self._check_bar_alignment(df, kind='bar', subplots=True, width=1)
self._check_bar_alignment(df, kind='barh', subplots=True, width=1)
@slow
def test_bar_bottom_left(self):
df = DataFrame(rand(5, 5))
ax = df.plot.bar(stacked=False, bottom=1)
result = [p.get_y() for p in ax.patches]
assert result == [1] * 25
ax = df.plot.bar(stacked=True, bottom=[-1, -2, -3, -4, -5])
result = [p.get_y() for p in ax.patches[:5]]
assert result == [-1, -2, -3, -4, -5]
ax = df.plot.barh(stacked=False, left=np.array([1, 1, 1, 1, 1]))
result = [p.get_x() for p in ax.patches]
assert result == [1] * 25
ax = df.plot.barh(stacked=True, left=[1, 2, 3, 4, 5])
result = [p.get_x() for p in ax.patches[:5]]
assert result == [1, 2, 3, 4, 5]
axes = df.plot.bar(subplots=True, bottom=-1)
for ax in axes:
result = [p.get_y() for p in ax.patches]
assert result == [-1] * 5
axes = df.plot.barh(subplots=True, left=np.array([1, 1, 1, 1, 1]))
for ax in axes:
result = [p.get_x() for p in ax.patches]
assert result == [1] * 5
@slow
def test_bar_nan(self):
df = DataFrame({'A': [10, np.nan, 20],
'B': [5, 10, 20],
'C': [1, 2, 3]})
ax = df.plot.bar()
expected = [10, 0, 20, 5, 10, 20, 1, 2, 3]
result = [p.get_height() for p in ax.patches]
assert result == expected
ax = df.plot.bar(stacked=True)
result = [p.get_height() for p in ax.patches]
assert result == expected
result = [p.get_y() for p in ax.patches]
expected = [0.0, 0.0, 0.0, 10.0, 0.0, 20.0, 15.0, 10.0, 40.0]
assert result == expected
@slow
def test_bar_categorical(self):
# GH 13019
df1 = pd.DataFrame(np.random.randn(6, 5),
index=pd.Index(list('ABCDEF')),
columns=pd.Index(list('abcde')))
# categorical index must behave the same
df2 = pd.DataFrame(np.random.randn(6, 5),
index=pd.CategoricalIndex(list('ABCDEF')),
columns=pd.CategoricalIndex(list('abcde')))
for df in [df1, df2]:
ax = df.plot.bar()
ticks = ax.xaxis.get_ticklocs()
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5]))
assert ax.get_xlim() == (-0.5, 5.5)
# check left-edge of bars
assert ax.patches[0].get_x() == -0.25
assert ax.patches[-1].get_x() == 5.15
ax = df.plot.bar(stacked=True)
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5]))
assert ax.get_xlim() == (-0.5, 5.5)
assert ax.patches[0].get_x() == -0.25
assert ax.patches[-1].get_x() == 4.75
@slow
def test_plot_scatter(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
_check_plot_works(df.plot.scatter, x='x', y='y')
_check_plot_works(df.plot.scatter, x=1, y=2)
with pytest.raises(TypeError):
df.plot.scatter(x='x')
with pytest.raises(TypeError):
df.plot.scatter(y='y')
# GH 6951
axes = df.plot(x='x', y='y', kind='scatter', subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@slow
def test_plot_scatter_with_c(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
axes = [df.plot.scatter(x='x', y='y', c='z'),
df.plot.scatter(x=0, y=1, c=2)]
for ax in axes:
# default to Greys
assert ax.collections[0].cmap.name == 'Greys'
if self.mpl_ge_1_3_1:
# n.b. there appears to be no public method to get the colorbar
# label
assert ax.collections[0].colorbar._label == 'z'
cm = 'cubehelix'
ax = df.plot.scatter(x='x', y='y', c='z', colormap=cm)
assert ax.collections[0].cmap.name == cm
# verify turning off colorbar works
ax = df.plot.scatter(x='x', y='y', c='z', colorbar=False)
assert ax.collections[0].colorbar is None
# verify that we can still plot a solid color
ax = df.plot.scatter(x=0, y=1, c='red')
assert ax.collections[0].colorbar is None
self._check_colors(ax.collections, facecolors=['r'])
# Ensure that we can pass an np.array straight through to matplotlib,
# this functionality was accidentally removed previously.
# See https://github.com/pandas-dev/pandas/issues/8852 for bug report
#
# Exercise colormap path and non-colormap path as they are independent
#
df = DataFrame({'A': [1, 2], 'B': [3, 4]})
red_rgba = [1.0, 0.0, 0.0, 1.0]
green_rgba = [0.0, 1.0, 0.0, 1.0]
rgba_array = np.array([red_rgba, green_rgba])
ax = df.plot.scatter(x='A', y='B', c=rgba_array)
# expect the face colors of the points in the non-colormap path to be
# identical to the values we supplied, normally we'd be on shaky ground
# comparing floats for equality but here we expect them to be
# identical.
tm.assert_numpy_array_equal(ax.collections[0]
.get_facecolor(), rgba_array)
# we don't test the colors of the faces in this next plot because they
# are dependent on the spring colormap, which may change its colors
# later.
float_array = np.array([0.0, 1.0])
df.plot.scatter(x='A', y='B', c=float_array, cmap='spring')
def test_scatter_colors(self):
df = DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3], 'c': [1, 2, 3]})
with pytest.raises(TypeError):
df.plot.scatter(x='a', y='b', c='c', color='green')
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
ax = df.plot.scatter(x='a', y='b', c='c')
tm.assert_numpy_array_equal(
ax.collections[0].get_facecolor()[0],
np.array(self.colorconverter.to_rgba(default_colors[0])))
ax = df.plot.scatter(x='a', y='b', color='white')
tm.assert_numpy_array_equal(ax.collections[0].get_facecolor()[0],
np.array([1, 1, 1, 1], dtype=np.float64))
@slow
def test_plot_bar(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
_check_plot_works(df.plot.bar)
_check_plot_works(df.plot.bar, legend=False)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.plot.bar, subplots=True)
_check_plot_works(df.plot.bar, stacked=True)
df = DataFrame(randn(10, 15),
index=list(string.ascii_letters[:10]),
columns=lrange(15))
_check_plot_works(df.plot.bar)
df = DataFrame({'a': [0, 1], 'b': [1, 0]})
ax = _check_plot_works(df.plot.bar)
self._check_ticks_props(ax, xrot=90)
ax = df.plot.bar(rot=35, fontsize=10)
self._check_ticks_props(ax, xrot=35, xlabelsize=10, ylabelsize=10)
ax = _check_plot_works(df.plot.barh)
self._check_ticks_props(ax, yrot=0)
ax = df.plot.barh(rot=55, fontsize=11)
self._check_ticks_props(ax, yrot=55, ylabelsize=11, xlabelsize=11)
def _check_bar_alignment(self, df, kind='bar', stacked=False,
subplots=False, align='center', width=0.5,
position=0.5):
axes = df.plot(kind=kind, stacked=stacked, subplots=subplots,
align=align, width=width, position=position, grid=True)
axes = self._flatten_visible(axes)
for ax in axes:
if kind == 'bar':
axis = ax.xaxis
ax_min, ax_max = ax.get_xlim()
min_edge = min([p.get_x() for p in ax.patches])
max_edge = max([p.get_x() + p.get_width() for p in ax.patches])
elif kind == 'barh':
axis = ax.yaxis
ax_min, ax_max = ax.get_ylim()
min_edge = min([p.get_y() for p in ax.patches])
max_edge = max([p.get_y() + p.get_height() for p in ax.patches
])
else:
raise ValueError
# GH 7498
# compare margins between lim and bar edges
tm.assert_almost_equal(ax_min, min_edge - 0.25)
tm.assert_almost_equal(ax_max, max_edge + 0.25)
p = ax.patches[0]
if kind == 'bar' and (stacked is True or subplots is True):
edge = p.get_x()
center = edge + p.get_width() * position
elif kind == 'bar' and stacked is False:
center = p.get_x() + p.get_width() * len(df.columns) * position
edge = p.get_x()
elif kind == 'barh' and (stacked is True or subplots is True):
center = p.get_y() + p.get_height() * position
edge = p.get_y()
elif kind == 'barh' and stacked is False:
center = p.get_y() + p.get_height() * len(
df.columns) * position
edge = p.get_y()
else:
raise ValueError
# Check the ticks locates on integer
assert (axis.get_ticklocs() == np.arange(len(df))).all()
if align == 'center':
# Check whether the bar locates on center
tm.assert_almost_equal(axis.get_ticklocs()[0], center)
elif align == 'edge':
# Check whether the bar's edge starts from the tick
tm.assert_almost_equal(axis.get_ticklocs()[0], edge)
else:
raise ValueError
return axes
@slow
def test_bar_stacked_center(self):
# GH2157
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=True)
self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9)
self._check_bar_alignment(df, kind='barh', stacked=True)
self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9)
@slow
def test_bar_center(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=False)
self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9)
self._check_bar_alignment(df, kind='barh', stacked=False)
self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9)
@slow
def test_bar_subplots_center(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', subplots=True)
self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9)
self._check_bar_alignment(df, kind='barh', subplots=True)
self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9)
@slow
def test_bar_align_single_column(self):
df = DataFrame(randn(5))
self._check_bar_alignment(df, kind='bar', stacked=False)
self._check_bar_alignment(df, kind='bar', stacked=True)
self._check_bar_alignment(df, kind='barh', stacked=False)
self._check_bar_alignment(df, kind='barh', stacked=True)
self._check_bar_alignment(df, kind='bar', subplots=True)
self._check_bar_alignment(df, kind='barh', subplots=True)
@slow
def test_bar_edge(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=True, align='edge')
self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='barh', stacked=True, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='bar', stacked=False, align='edge')
self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='barh', stacked=False, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='bar', subplots=True, align='edge')
self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='barh', subplots=True, align='edge')
self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9,
align='edge')
@slow
def test_bar_log_no_subplots(self):
# GH3254, GH3298 matplotlib/matplotlib#1882, #1892
# regressions in 1.2.1
expected = np.array([1., 10.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 100))
# no subplots
df = DataFrame({'A': [3] * 5, 'B': lrange(1, 6)}, index=lrange(5))
ax = df.plot.bar(grid=True, log=True)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
@slow
def test_bar_log_subplots(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = DataFrame([Series([200, 300]), Series([300, 500])]).plot.bar(
log=True, subplots=True)
tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected)
tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected)
@slow
def test_boxplot(self):
df = self.hist_df
series = df['height']
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
ax = _check_plot_works(df.plot.box)
self._check_text_labels(ax.get_xticklabels(), labels)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(),
np.arange(1, len(numeric_cols) + 1))
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
# different warning on py3
if not PY3:
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.box, subplots=True, logy=True)
self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
self._check_ax_scales(axes, yaxis='log')
for ax, label in zip(axes, labels):
self._check_text_labels(ax.get_xticklabels(), [label])
assert len(ax.lines) == self.bp_n_objects
axes = series.plot.box(rot=40)
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
ax = _check_plot_works(series.plot.box)
positions = np.array([1, 6, 7])
ax = df.plot.box(positions=positions)
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
self._check_text_labels(ax.get_xticklabels(), labels)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), positions)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
@slow
def test_boxplot_vertical(self):
df = self.hist_df
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
# if horizontal, yticklabels are rotated
ax = df.plot.box(rot=50, fontsize=8, vert=False)
self._check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8)
self._check_text_labels(ax.get_yticklabels(), labels)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.box,
subplots=True, vert=False, logx=True)
self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
self._check_ax_scales(axes, xaxis='log')
for ax, label in zip(axes, labels):
self._check_text_labels(ax.get_yticklabels(), [label])
assert len(ax.lines) == self.bp_n_objects
positions = np.array([3, 2, 8])
ax = df.plot.box(positions=positions, vert=False)
self._check_text_labels(ax.get_yticklabels(), labels)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), positions)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
@slow
def test_boxplot_return_type(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
with pytest.raises(ValueError):
df.plot.box(return_type='NOTATYPE')
result = df.plot.box(return_type='dict')
self._check_box_return_type(result, 'dict')
result = df.plot.box(return_type='axes')
self._check_box_return_type(result, 'axes')
result = df.plot.box() # default axes
self._check_box_return_type(result, 'axes')
result = df.plot.box(return_type='both')
self._check_box_return_type(result, 'both')
@slow
def test_boxplot_subplots_return_type(self):
df = self.hist_df
# normal style: return_type=None
result = df.plot.box(subplots=True)
assert isinstance(result, Series)
self._check_box_return_type(result, None, expected_keys=[
'height', 'weight', 'category'])
for t in ['dict', 'axes', 'both']:
returned = df.plot.box(return_type=t, subplots=True)
self._check_box_return_type(
returned, t,
expected_keys=['height', 'weight', 'category'],
check_ax_title=False)
@slow
def test_kde_df(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
df = DataFrame(randn(100, 4))
ax = _check_plot_works(df.plot, kind='kde')
expected = [pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
self._check_ticks_props(ax, xrot=0)
ax = df.plot(kind='kde', rot=20, fontsize=5)
self._check_ticks_props(ax, xrot=20, xlabelsize=5, ylabelsize=5)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot, kind='kde',
subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = df.plot(kind='kde', logy=True, subplots=True)
self._check_ax_scales(axes, yaxis='log')
@slow
def test_kde_missing_vals(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
df = DataFrame(np.random.uniform(size=(100, 4)))
df.loc[0, 0] = np.nan
_check_plot_works(df.plot, kind='kde')
@slow
def test_hist_df(self):
from matplotlib.patches import Rectangle
if self.mpl_le_1_2_1:
pytest.skip("not supported in matplotlib <= 1.2.x")
df = DataFrame(randn(100, 4))
series = df[0]
ax = _check_plot_works(df.plot.hist)
expected = [pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.hist,
subplots=True, logy=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
self._check_ax_scales(axes, yaxis='log')
axes = series.plot.hist(rot=40)
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
ax = series.plot.hist(normed=True, cumulative=True, bins=4)
# height of last bin (index 5) must be 1.0
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
tm.assert_almost_equal(rects[-1].get_height(), 1.0)
tm.close()
ax = series.plot.hist(cumulative=True, bins=4)
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
tm.assert_almost_equal(rects[-2].get_height(), 100.0)
tm.close()
# if horizontal, yticklabels are rotated
axes = df.plot.hist(rot=50, fontsize=8, orientation='horizontal')
self._check_ticks_props(axes, xrot=0, yrot=50, ylabelsize=8)
def _check_box_coord(self, patches, expected_y=None, expected_h=None,
expected_x=None, expected_w=None):
result_y = np.array([p.get_y() for p in patches])
result_height = np.array([p.get_height() for p in patches])
result_x = np.array([p.get_x() for p in patches])
result_width = np.array([p.get_width() for p in patches])
# dtype is depending on above values, no need to check
if expected_y is not None:
tm.assert_numpy_array_equal(result_y, expected_y,
check_dtype=False)
if expected_h is not None:
tm.assert_numpy_array_equal(result_height, expected_h,
check_dtype=False)
if expected_x is not None:
tm.assert_numpy_array_equal(result_x, expected_x,
check_dtype=False)
if expected_w is not None:
tm.assert_numpy_array_equal(result_width, expected_w,
check_dtype=False)
@slow
def test_hist_df_coord(self):
normal_df = DataFrame({'A': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([10, 9, 8, 7, 6])),
'B': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([8, 8, 8, 8, 8])),
'C': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([6, 7, 8, 9, 10]))},
columns=['A', 'B', 'C'])
nan_df = DataFrame({'A': np.repeat(np.array([np.nan, 1, 2, 3, 4, 5]),
np.array([3, 10, 9, 8, 7, 6])),
'B': np.repeat(np.array([1, np.nan, 2, 3, 4, 5]),
np.array([8, 3, 8, 8, 8, 8])),
'C': np.repeat(np.array([1, 2, 3, np.nan, 4, 5]),
np.array([6, 7, 8, 3, 9, 10]))},
columns=['A', 'B', 'C'])
for df in [normal_df, nan_df]:
ax = df.plot.hist(bins=5)
self._check_box_coord(ax.patches[:5],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]))
ax = df.plot.hist(bins=5, stacked=True)
self._check_box_coord(ax.patches[:5],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10],
expected_y=np.array([10, 9, 8, 7, 6]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:],
expected_y=np.array([18, 17, 16, 15, 14]),
expected_h=np.array([6, 7, 8, 9, 10]))
axes = df.plot.hist(bins=5, stacked=True, subplots=True)
self._check_box_coord(axes[0].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(axes[1].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(axes[2].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]))
if self.mpl_ge_1_3_1:
# horizontal
ax = df.plot.hist(bins=5, orientation='horizontal')
self._check_box_coord(ax.patches[:5],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]))
ax = df.plot.hist(bins=5, stacked=True,
orientation='horizontal')
self._check_box_coord(ax.patches[:5],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10],
expected_x=np.array([10, 9, 8, 7, 6]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(
ax.patches[10:],
expected_x=np.array([18, 17, 16, 15, 14]),
expected_w=np.array([6, 7, 8, 9, 10]))
axes = df.plot.hist(bins=5, stacked=True, subplots=True,
orientation='horizontal')
self._check_box_coord(axes[0].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(axes[1].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(axes[2].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]))
@slow
def test_plot_int_columns(self):
df = DataFrame(randn(100, 4)).cumsum()
_check_plot_works(df.plot, legend=True)
@slow
def test_df_legend_labels(self):
kinds = ['line', 'bar', 'barh', 'kde', 'area', 'hist']
df = DataFrame(rand(3, 3), columns=['a', 'b', 'c'])
df2 = DataFrame(rand(3, 3), columns=['d', 'e', 'f'])
df3 = DataFrame(rand(3, 3), columns=['g', 'h', 'i'])
df4 = DataFrame(rand(3, 3), columns=['j', 'k', 'l'])
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
ax = df.plot(kind=kind, legend=True)
self._check_legend_labels(ax, labels=df.columns)
ax = df2.plot(kind=kind, legend=False, ax=ax)
self._check_legend_labels(ax, labels=df.columns)
ax = df3.plot(kind=kind, legend=True, ax=ax)
self._check_legend_labels(ax, labels=df.columns.union(df3.columns))
ax = df4.plot(kind=kind, legend='reverse', ax=ax)
expected = list(df.columns.union(df3.columns)) + list(reversed(
df4.columns))
self._check_legend_labels(ax, labels=expected)
# Secondary Y
ax = df.plot(legend=True, secondary_y='b')
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df2.plot(legend=False, ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df3.plot(kind='bar', legend=True, secondary_y='h', ax=ax)
self._check_legend_labels(
ax, labels=['a', 'b (right)', 'c', 'g', 'h (right)', 'i'])
# Time Series
ind = date_range('1/1/2014', periods=3)
df = DataFrame(randn(3, 3), columns=['a', 'b', 'c'], index=ind)
df2 = DataFrame(randn(3, 3), columns=['d', 'e', 'f'], index=ind)
df3 = DataFrame(randn(3, 3), columns=['g', 'h', 'i'], index=ind)
ax = df.plot(legend=True, secondary_y='b')
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df2.plot(legend=False, ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df3.plot(legend=True, ax=ax)
self._check_legend_labels(
ax, labels=['a', 'b (right)', 'c', 'g', 'h', 'i'])
# scatter
ax = df.plot.scatter(x='a', y='b', label='data1')
self._check_legend_labels(ax, labels=['data1'])
ax = df2.plot.scatter(x='d', y='e', legend=False, label='data2', ax=ax)
self._check_legend_labels(ax, labels=['data1'])
ax = df3.plot.scatter(x='g', y='h', label='data3', ax=ax)
self._check_legend_labels(ax, labels=['data1', 'data3'])
# ensure label args pass through and
# index name does not mutate
# column names don't mutate
df5 = df.set_index('a')
ax = df5.plot(y='b')
self._check_legend_labels(ax, labels=['b'])
ax = df5.plot(y='b', label='LABEL_b')
self._check_legend_labels(ax, labels=['LABEL_b'])
self._check_text_labels(ax.xaxis.get_label(), 'a')
ax = df5.plot(y='c', label='LABEL_c', ax=ax)
self._check_legend_labels(ax, labels=['LABEL_b', 'LABEL_c'])
assert df5.columns.tolist() == ['b', 'c']
def test_legend_name(self):
multi = DataFrame(randn(4, 4),
columns=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
multi.columns.names = ['group', 'individual']
ax = multi.plot()
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
df = DataFrame(randn(5, 5))
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
df.columns.name = 'new'
ax = df.plot(legend=False, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'new')
@slow
def test_no_legend(self):
kinds = ['line', 'bar', 'barh', 'kde', 'area', 'hist']
df = DataFrame(rand(3, 3), columns=['a', 'b', 'c'])
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
ax = df.plot(kind=kind, legend=False)
self._check_legend_labels(ax, visible=False)
@slow
def test_style_by_column(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
df = DataFrame(randn(100, 3))
for markers in [{0: '^',
1: '+',
2: 'o'}, {0: '^',
1: '+'}, ['^', '+', 'o'], ['^', '+']]:
fig.clf()
fig.add_subplot(111)
ax = df.plot(style=markers)
for i, l in enumerate(ax.get_lines()[:len(markers)]):
assert l.get_marker() == markers[i]
@slow
def test_line_label_none(self):
s = Series([1, 2])
ax = s.plot()
assert ax.get_legend() is None
ax = s.plot(legend=True)
assert ax.get_legend().get_texts()[0].get_text() == 'None'
@slow
@tm.capture_stdout
def test_line_colors(self):
from matplotlib import cm
custom_colors = 'rgcby'
df = DataFrame(randn(5, 5))
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
ax2 = df.plot(colors=custom_colors)
lines2 = ax2.get_lines()
for l1, l2 in zip(ax.get_lines(), lines2):
assert l1.get_color() == l2.get_color()
tm.close()
ax = df.plot(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
ax = df.loc[:, [0]].plot(color='DodgerBlue')
self._check_colors(ax.lines, linecolors=['DodgerBlue'])
ax = df.plot(color='red')
self._check_colors(ax.get_lines(), linecolors=['red'] * 5)
tm.close()
# GH 10299
custom_colors = ['#FF0000', '#0000FF', '#FFFF00', '#000000', '#FFFFFF']
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
with pytest.raises(ValueError):
# Color contains shorthand hex value results in ValueError
custom_colors = ['#F00', '#00F', '#FF0', '#000', '#FFF']
# Forced show plot
_check_plot_works(df.plot, color=custom_colors)
@slow
def test_dont_modify_colors(self):
colors = ['r', 'g', 'b']
pd.DataFrame(np.random.rand(10, 2)).plot(color=colors)
assert len(colors) == 3
@slow
def test_line_colors_and_styles_subplots(self):
# GH 9894
from matplotlib import cm
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
axes = df.plot(subplots=True)
for ax, c in zip(axes, list(default_colors)):
if self.mpl_ge_2_0_0:
c = [c]
self._check_colors(ax.get_lines(), linecolors=c)
tm.close()
# single color char
axes = df.plot(subplots=True, color='k')
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['k'])
tm.close()
# single color str
axes = df.plot(subplots=True, color='green')
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['green'])
tm.close()
custom_colors = 'rgcby'
axes = df.plot(color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
axes = df.plot(color=list(custom_colors), subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# GH 10299
custom_colors = ['#FF0000', '#0000FF', '#FFFF00', '#000000', '#FFFFFF']
axes = df.plot(color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
with pytest.raises(ValueError):
# Color contains shorthand hex value results in ValueError
custom_colors = ['#F00', '#00F', '#FF0', '#000', '#FFF']
# Forced show plot
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.plot, color=custom_colors, subplots=True)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
for cmap in ['jet', cm.jet]:
axes = df.plot(colormap=cmap, subplots=True)
for ax, c in zip(axes, rgba_colors):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.loc[:, [0]].plot(color='DodgerBlue', subplots=True)
self._check_colors(axes[0].lines, linecolors=['DodgerBlue'])
# single character style
axes = df.plot(style='r', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['r'])
tm.close()
# list of styles
styles = list('rgcby')
axes = df.plot(style=styles, subplots=True)
for ax, c in zip(axes, styles):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
@slow
def test_area_colors(self):
from matplotlib import cm
from matplotlib.collections import PolyCollection
custom_colors = 'rgcby'
df = DataFrame(rand(5, 5))
ax = df.plot.area(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=custom_colors)
handles, labels = ax.get_legend_handles_labels()
if self.mpl_ge_1_5_0:
self._check_colors(handles, facecolors=custom_colors)
else:
# legend is stored as Line2D, thus check linecolors
linehandles = [x for x in handles
if not isinstance(x, PolyCollection)]
self._check_colors(linehandles, linecolors=custom_colors)
for h in handles:
assert h.get_alpha() is None
tm.close()
ax = df.plot.area(colormap='jet')
jet_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=jet_colors)
handles, labels = ax.get_legend_handles_labels()
if self.mpl_ge_1_5_0:
self._check_colors(handles, facecolors=jet_colors)
else:
linehandles = [x for x in handles
if not isinstance(x, PolyCollection)]
self._check_colors(linehandles, linecolors=jet_colors)
for h in handles:
assert h.get_alpha() is None
tm.close()
# When stacked=False, alpha is set to 0.5
ax = df.plot.area(colormap=cm.jet, stacked=False)
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors]
self._check_colors(poly, facecolors=jet_with_alpha)
handles, labels = ax.get_legend_handles_labels()
if self.mpl_ge_1_5_0:
linecolors = jet_with_alpha
else:
# Line2D can't have alpha in its linecolor
linecolors = jet_colors
self._check_colors(handles[:len(jet_colors)], linecolors=linecolors)
for h in handles:
assert h.get_alpha() == 0.5
@slow
def test_hist_colors(self):
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
ax = df.plot.hist()
self._check_colors(ax.patches[::10], facecolors=default_colors[:5])
tm.close()
custom_colors = 'rgcby'
ax = df.plot.hist(color=custom_colors)
self._check_colors(ax.patches[::10], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.hist(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.hist(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
ax = df.loc[:, [0]].plot.hist(color='DodgerBlue')
self._check_colors([ax.patches[0]], facecolors=['DodgerBlue'])
ax = df.plot(kind='hist', color='green')
self._check_colors(ax.patches[::10], facecolors=['green'] * 5)
tm.close()
@slow
def test_kde_colors(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
from matplotlib import cm
custom_colors = 'rgcby'
df = DataFrame(rand(5, 5))
ax = df.plot.kde(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
ax = df.plot.kde(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot.kde(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
@slow
def test_kde_colors_and_styles_subplots(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
from matplotlib import cm
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
axes = df.plot(kind='kde', subplots=True)
for ax, c in zip(axes, list(default_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# single color char
axes = df.plot(kind='kde', color='k', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['k'])
tm.close()
# single color str
axes = df.plot(kind='kde', color='red', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['red'])
tm.close()
custom_colors = 'rgcby'
axes = df.plot(kind='kde', color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
for cmap in ['jet', cm.jet]:
axes = df.plot(kind='kde', colormap=cmap, subplots=True)
for ax, c in zip(axes, rgba_colors):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.loc[:, [0]].plot(kind='kde', color='DodgerBlue',
subplots=True)
self._check_colors(axes[0].lines, linecolors=['DodgerBlue'])
# single character style
axes = df.plot(kind='kde', style='r', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['r'])
tm.close()
# list of styles
styles = list('rgcby')
axes = df.plot(kind='kde', style=styles, subplots=True)
for ax, c in zip(axes, styles):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
@slow
def test_boxplot_colors(self):
def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c='k',
fliers_c=None):
# TODO: outside this func?
if fliers_c is None:
fliers_c = 'k' if self.mpl_ge_2_0_0 else 'b'
self._check_colors(bp['boxes'],
linecolors=[box_c] * len(bp['boxes']))
self._check_colors(bp['whiskers'],
linecolors=[whiskers_c] * len(bp['whiskers']))
self._check_colors(bp['medians'],
linecolors=[medians_c] * len(bp['medians']))
self._check_colors(bp['fliers'],
linecolors=[fliers_c] * len(bp['fliers']))
self._check_colors(bp['caps'],
linecolors=[caps_c] * len(bp['caps']))
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
bp = df.plot.box(return_type='dict')
_check_colors(bp, default_colors[0], default_colors[0],
default_colors[2])
tm.close()
dict_colors = dict(boxes='#572923', whiskers='#982042',
medians='#804823', caps='#123456')
bp = df.plot.box(color=dict_colors, sym='r+', return_type='dict')
_check_colors(bp, dict_colors['boxes'], dict_colors['whiskers'],
dict_colors['medians'], dict_colors['caps'], 'r')
tm.close()
# partial colors
dict_colors = dict(whiskers='c', medians='m')
bp = df.plot.box(color=dict_colors, return_type='dict')
_check_colors(bp, default_colors[0], 'c', 'm')
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
bp = df.plot.box(colormap='jet', return_type='dict')
jet_colors = lmap(cm.jet, np.linspace(0, 1, 3))
_check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
tm.close()
# Test colormap functionality
bp = df.plot.box(colormap=cm.jet, return_type='dict')
_check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
tm.close()
# string color is applied to all artists except fliers
bp = df.plot.box(color='DodgerBlue', return_type='dict')
_check_colors(bp, 'DodgerBlue', 'DodgerBlue', 'DodgerBlue',
'DodgerBlue')
# tuple is also applied to all artists except fliers
bp = df.plot.box(color=(0, 1, 0), sym='#123456', return_type='dict')
_check_colors(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0),
(0, 1, 0), '#123456')
with pytest.raises(ValueError):
# Color contains invalid key results in ValueError
df.plot.box(color=dict(boxes='red', xxxx='blue'))
def test_default_color_cycle(self):
import matplotlib.pyplot as plt
colors = list('rgbk')
if self.mpl_ge_1_5_0:
import cycler
plt.rcParams['axes.prop_cycle'] = cycler.cycler('color', colors)
else:
plt.rcParams['axes.color_cycle'] = colors
df = DataFrame(randn(5, 3))
ax = df.plot()
expected = self._maybe_unpack_cycler(plt.rcParams)[:3]
self._check_colors(ax.get_lines(), linecolors=expected)
def test_unordered_ts(self):
df = DataFrame(np.array([3.0, 2.0, 1.0]),
index=[date(2012, 10, 1),
date(2012, 9, 1),
date(2012, 8, 1)],
columns=['test'])
ax = df.plot()
xticks = ax.lines[0].get_xdata()
assert xticks[0] < xticks[1]
ydata = ax.lines[0].get_ydata()
tm.assert_numpy_array_equal(ydata, np.array([1.0, 2.0, 3.0]))
def test_kind_both_ways(self):
df = DataFrame({'x': [1, 2, 3]})
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
df.plot(kind=kind)
getattr(df.plot, kind)()
for kind in ['scatter', 'hexbin']:
df.plot('x', 'x', kind=kind)
getattr(df.plot, kind)('x', 'x')
def test_all_invalid_plot_data(self):
df = DataFrame(list('abcd'))
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with pytest.raises(TypeError):
df.plot(kind=kind)
@slow
def test_partially_invalid_plot_data(self):
with tm.RNGContext(42):
df = DataFrame(randn(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = 'a'
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with pytest.raises(TypeError):
df.plot(kind=kind)
with tm.RNGContext(42):
# area plot doesn't support positive/negative mixed data
kinds = ['area']
df = DataFrame(rand(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = 'a'
for kind in kinds:
with pytest.raises(TypeError):
df.plot(kind=kind)
def test_invalid_kind(self):
df = DataFrame(randn(10, 2))
with pytest.raises(ValueError):
df.plot(kind='aasdf')
@slow
def test_hexbin_basic(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', gridsize=10)
# TODO: need better way to test. This just does existence.
assert len(ax.collections) == 1
# GH 6951
axes = df.plot.hexbin(x='A', y='B', subplots=True)
# hexbin should have 2 axes in the figure, 1 for plotting and another
# is colorbar
assert len(axes[0].figure.axes) == 2
# return value is single axes
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@slow
def test_hexbin_with_c(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', C='C')
assert len(ax.collections) == 1
ax = df.plot.hexbin(x='A', y='B', C='C', reduce_C_function=np.std)
assert len(ax.collections) == 1
@slow
def test_hexbin_cmap(self):
df = self.hexbin_df
# Default to BuGn
ax = df.plot.hexbin(x='A', y='B')
assert ax.collections[0].cmap.name == 'BuGn'
cm = 'cubehelix'
ax = df.plot.hexbin(x='A', y='B', colormap=cm)
assert ax.collections[0].cmap.name == cm
@slow
def test_no_color_bar(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', colorbar=None)
assert ax.collections[0].colorbar is None
@slow
def test_allow_cmap(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', cmap='YlGn')
assert ax.collections[0].cmap.name == 'YlGn'
with pytest.raises(TypeError):
df.plot.hexbin(x='A', y='B', cmap='YlGn', colormap='BuGn')
@slow
def test_pie_df(self):
df = DataFrame(np.random.rand(5, 3), columns=['X', 'Y', 'Z'],
index=['a', 'b', 'c', 'd', 'e'])
with pytest.raises(ValueError):
df.plot.pie()
ax = _check_plot_works(df.plot.pie, y='Y')
self._check_text_labels(ax.texts, df.index)
ax = _check_plot_works(df.plot.pie, y=2)
self._check_text_labels(ax.texts, df.index)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.pie,
subplots=True)
assert len(axes) == len(df.columns)
for ax in axes:
self._check_text_labels(ax.texts, df.index)
for ax, ylabel in zip(axes, df.columns):
assert ax.get_ylabel() == ylabel
labels = ['A', 'B', 'C', 'D', 'E']
color_args = ['r', 'g', 'b', 'c', 'm']
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.pie,
subplots=True, labels=labels,
colors=color_args)
assert len(axes) == len(df.columns)
for ax in axes:
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
def test_pie_df_nan(self):
df = DataFrame(np.random.rand(4, 4))
for i in range(4):
df.iloc[i, i] = np.nan
fig, axes = self.plt.subplots(ncols=4)
df.plot.pie(subplots=True, ax=axes, legend=True)
base_expected = ['0', '1', '2', '3']
for i, ax in enumerate(axes):
expected = list(base_expected) # force copy
expected[i] = ''
result = [x.get_text() for x in ax.texts]
assert result == expected
# legend labels
# NaN's not included in legend with subplots
# see https://github.com/pandas-dev/pandas/issues/8390
assert ([x.get_text() for x in ax.get_legend().get_texts()] ==
base_expected[:i] + base_expected[i + 1:])
@slow
def test_errorbar_plot(self):
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {'x': np.ones(12) * 0.2, 'y': np.ones(12) * 0.4}
df_err = DataFrame(d_err)
# check line plots
ax = _check_plot_works(df.plot, yerr=df_err, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, logx=True, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, loglog=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
kinds = ['line', 'bar', 'barh']
for kind in kinds:
ax = _check_plot_works(df.plot, yerr=df_err['x'], kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, xerr=df_err,
kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err['x'], xerr=df_err['x'],
kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
ax = _check_plot_works(df.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot,
yerr=df_err, xerr=df_err,
subplots=True,
kind=kind)
self._check_has_errorbars(axes, xerr=1, yerr=1)
ax = _check_plot_works((df + 1).plot, yerr=df_err,
xerr=df_err, kind='bar', log=True)
self._check_has_errorbars(ax, xerr=2, yerr=2)
# yerr is raw error values
ax = _check_plot_works(df['y'].plot, yerr=np.ones(12) * 0.4)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot, yerr=np.ones((2, 12)) * 0.4)
self._check_has_errorbars(ax, xerr=0, yerr=2)
# yerr is iterator
import itertools
ax = _check_plot_works(df.plot, yerr=itertools.repeat(0.1, len(df)))
self._check_has_errorbars(ax, xerr=0, yerr=2)
# yerr is column name
for yerr in ['yerr', u('誤差')]:
s_df = df.copy()
s_df[yerr] = np.ones(12) * 0.2
ax = _check_plot_works(s_df.plot, yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(s_df.plot, y='y', x='x', yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=1)
with pytest.raises(ValueError):
df.plot(yerr=np.random.randn(11))
df_err = DataFrame({'x': ['zzz'] * 12, 'y': ['zzz'] * 12})
with pytest.raises((ValueError, TypeError)):
df.plot(yerr=df_err)
@slow
def test_errorbar_with_integer_column_names(self):
# test with integer column names
df = DataFrame(np.random.randn(10, 2))
df_err = DataFrame(np.random.randn(10, 2))
ax = _check_plot_works(df.plot, yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, y=0, yerr=1)
self._check_has_errorbars(ax, xerr=0, yerr=1)
@slow
def test_errorbar_with_partial_columns(self):
df = DataFrame(np.random.randn(10, 3))
df_err = DataFrame(np.random.randn(10, 2), columns=[0, 2])
kinds = ['line', 'bar']
for kind in kinds:
ax = _check_plot_works(df.plot, yerr=df_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ix = date_range('1/1/2000', periods=10, freq='M')
df.set_index(ix, inplace=True)
df_err.set_index(ix, inplace=True)
ax = _check_plot_works(df.plot, yerr=df_err, kind='line')
self._check_has_errorbars(ax, xerr=0, yerr=2)
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {'x': np.ones(12) * 0.2, 'z': np.ones(12) * 0.4}
df_err = DataFrame(d_err)
for err in [d_err, df_err]:
ax = _check_plot_works(df.plot, yerr=err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
@slow
def test_errorbar_timeseries(self):
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
d_err = {'x': np.ones(12) * 0.2, 'y': np.ones(12) * 0.4}
# check time-series plots
ix = date_range('1/1/2000', '1/1/2001', freq='M')
tdf = DataFrame(d, index=ix)
tdf_err = DataFrame(d_err, index=ix)
kinds = ['line', 'bar', 'barh']
for kind in kinds:
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, y='y', yerr=tdf_err['x'],
kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, y='y', yerr='x', kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(tdf.plot,
kind=kind, yerr=tdf_err,
subplots=True)
self._check_has_errorbars(axes, xerr=0, yerr=1)
def test_errorbar_asymmetrical(self):
np.random.seed(0)
err = np.random.rand(3, 2, 5)
# each column is [0, 1, 2, 3, 4], [3, 4, 5, 6, 7]...
df = DataFrame(np.arange(15).reshape(3, 5)).T
data = df.values
ax = df.plot(yerr=err, xerr=err / 2)
if self.mpl_ge_2_0_0:
yerr_0_0 = ax.collections[1].get_paths()[0].vertices[:, 1]
expected_0_0 = err[0, :, 0] * np.array([-1, 1])
tm.assert_almost_equal(yerr_0_0, expected_0_0)
else:
assert ax.lines[7].get_ydata()[0] == data[0, 1] - err[1, 0, 0]
assert ax.lines[8].get_ydata()[0] == data[0, 1] + err[1, 1, 0]
assert ax.lines[5].get_xdata()[0] == -err[1, 0, 0] / 2
assert ax.lines[6].get_xdata()[0] == err[1, 1, 0] / 2
with pytest.raises(ValueError):
df.plot(yerr=err.T)
tm.close()
def test_table(self):
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, table=True)
_check_plot_works(df.plot, table=df)
ax = df.plot()
assert len(ax.tables) == 0
plotting.table(ax, df.T)
assert len(ax.tables) == 1
def test_errorbar_scatter(self):
df = DataFrame(
np.random.randn(5, 2), index=range(5), columns=['x', 'y'])
df_err = DataFrame(np.random.randn(5, 2) / 5,
index=range(5), columns=['x', 'y'])
ax = _check_plot_works(df.plot.scatter, x='x', y='y')
self._check_has_errorbars(ax, xerr=0, yerr=0)
ax = _check_plot_works(df.plot.scatter, x='x', y='y', xerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
ax = _check_plot_works(df.plot.scatter, x='x', y='y', yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot.scatter, x='x', y='y', xerr=df_err,
yerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=1)
def _check_errorbar_color(containers, expected, has_err='has_xerr'):
lines = []
errs = [c.lines
for c in ax.containers if getattr(c, has_err, False)][0]
for el in errs:
if is_list_like(el):
lines.extend(el)
else:
lines.append(el)
err_lines = [x for x in lines if x in ax.collections]
self._check_colors(
err_lines, linecolors=np.array([expected] * len(err_lines)))
# GH 8081
df = DataFrame(
np.random.randn(10, 5), columns=['a', 'b', 'c', 'd', 'e'])
ax = df.plot.scatter(x='a', y='b', xerr='d', yerr='e', c='red')
self._check_has_errorbars(ax, xerr=1, yerr=1)
_check_errorbar_color(ax.containers, 'red', has_err='has_xerr')
_check_errorbar_color(ax.containers, 'red', has_err='has_yerr')
ax = df.plot.scatter(x='a', y='b', yerr='e', color='green')
self._check_has_errorbars(ax, xerr=0, yerr=1)
_check_errorbar_color(ax.containers, 'green', has_err='has_yerr')
@slow
def test_sharex_and_ax(self):
# https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
# the axis in fig.get_axis() are sorted differently than pandas
# expected them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
plt.close('all')
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame({"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6]})
def _check(axes):
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[0], axes[2]]:
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(
ax.get_xticklabels(minor=True), visible=False)
for ax in [axes[1], axes[3]]:
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(
ax.get_xticklabels(minor=True), visible=True)
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharex=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True)
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@slow
def test_sharey_and_ax(self):
# https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
# the axis in fig.get_axis() are sorted differently than pandas
# expected them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame({"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6]})
def _check(axes):
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(
ax.get_xticklabels(minor=True), visible=True)
for ax in [axes[0], axes[1]]:
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[2], axes[3]]:
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharey=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharey=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
def test_memory_leak(self):
""" Check that every plot type gets properly collected. """
import weakref
import gc
results = {}
for kind in plotting._core._plot_klass.keys():
if not _ok_for_gaussian_kde(kind):
continue
args = {}
if kind in ['hexbin', 'scatter', 'pie']:
df = self.hexbin_df
args = {'x': 'A', 'y': 'B'}
elif kind == 'area':
df = self.tdf.abs()
else:
df = self.tdf
# Use a weakref so we can see if the object gets collected without
# also preventing it from being collected
results[kind] = weakref.proxy(df.plot(kind=kind, **args))
# have matplotlib delete all the figures
tm.close()
# force a garbage collection
gc.collect()
for key in results:
# check that every plot was collected
with pytest.raises(ReferenceError):
# need to actually access something to get an error
results[key].lines
@slow
def test_df_subplots_patterns_minorticks(self):
# GH 10657
import matplotlib.pyplot as plt
df = DataFrame(np.random.randn(10, 2),
index=date_range('1/1/2000', periods=10),
columns=list('AB'))
# shared subplots
fig, axes = plt.subplots(2, 1, sharex=True)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
fig, axes = plt.subplots(2, 1)
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
# not shared
fig, axes = plt.subplots(2, 1)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@slow
def test_df_gridspec_patterns(self):
# GH 10819
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
ts = Series(np.random.randn(10),
index=date_range('1/1/2000', periods=10))
df = DataFrame(np.random.randn(10, 2), index=ts.index,
columns=list('AB'))
def _get_vertical_grid():
gs = gridspec.GridSpec(3, 1)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:2, :])
ax2 = fig.add_subplot(gs[2, :])
return ax1, ax2
def _get_horizontal_grid():
gs = gridspec.GridSpec(1, 3)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:, :2])
ax2 = fig.add_subplot(gs[:, 2])
return ax1, ax2
for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:
ax1 = ts.plot(ax=ax1)
assert len(ax1.lines) == 1
ax2 = df.plot(ax=ax2)
assert len(ax2.lines) == 2
for ax in [ax1, ax2]:
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(
ax.get_xticklabels(minor=True), visible=True)
tm.close()
# subplots=True
for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:
axes = df.plot(subplots=True, ax=[ax1, ax2])
assert len(ax1.lines) == 1
assert len(ax2.lines) == 1
for ax in axes:
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(
ax.get_xticklabels(minor=True), visible=True)
tm.close()
# vertical / subplots / sharex=True / sharey=True
ax1, ax2 = _get_vertical_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True,
sharey=True)
assert len(axes[0].lines) == 1
assert len(axes[1].lines) == 1
for ax in [ax1, ax2]:
# yaxis are visible because there is only one column
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of axes0 (top) are hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
# horizontal / subplots / sharex=True / sharey=True
ax1, ax2 = _get_horizontal_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True,
sharey=True)
assert len(axes[0].lines) == 1
assert len(axes[1].lines) == 1
self._check_visible(axes[0].get_yticklabels(), visible=True)
# yaxis of axes1 (right) are hidden
self._check_visible(axes[1].get_yticklabels(), visible=False)
for ax in [ax1, ax2]:
# xaxis are visible because there is only one column
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# boxed
def _get_boxed_grid():
gs = gridspec.GridSpec(3, 3)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:2, :2])
ax2 = fig.add_subplot(gs[:2, 2])
ax3 = fig.add_subplot(gs[2, :2])
ax4 = fig.add_subplot(gs[2, 2])
return ax1, ax2, ax3, ax4
axes = _get_boxed_grid()
df = DataFrame(np.random.randn(10, 4),
index=ts.index, columns=list('ABCD'))
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
# axis are visible because these are not shared
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# subplots / sharex=True / sharey=True
axes = _get_boxed_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True, sharey=True)
for ax in axes:
assert len(ax.lines) == 1
for ax in [axes[0], axes[2]]: # left column
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[1], axes[3]]: # right column
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in [axes[0], axes[1]]: # top row
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
for ax in [axes[2], axes[3]]: # bottom row
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@slow
def test_df_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(
DataFrame({'a': [1, 2, 3], 'b': [2, 3, 4]}),
plotting._core._dataframe_kinds, kws={'x': 'a', 'y': 'b'})
def test_option_mpl_style(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
set_option('display.mpl_style', 'default')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
set_option('display.mpl_style', None)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
set_option('display.mpl_style', False)
with pytest.raises(ValueError):
set_option('display.mpl_style', 'default2')
def test_invalid_colormap(self):
df = DataFrame(randn(3, 2), columns=['A', 'B'])
with pytest.raises(ValueError):
df.plot(colormap='invalid_colormap')
def test_plain_axes(self):
# supplied ax itself is a SubplotAxes, but figure contains also
# a plain Axes object (GH11556)
fig, ax = self.plt.subplots()
fig.add_axes([0.2, 0.2, 0.2, 0.2])
Series(rand(10)).plot(ax=ax)
# suppliad ax itself is a plain Axes, but because the cmap keyword
# a new ax is created for the colorbar -> also multiples axes (GH11520)
df = DataFrame({'a': randn(8), 'b': randn(8)})
fig = self.plt.figure()
ax = fig.add_axes((0, 0, 1, 1))
df.plot(kind='scatter', ax=ax, x='a', y='b', c='a', cmap='hsv')
# other examples
fig, ax = self.plt.subplots()
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
Series(rand(10)).plot(ax=ax)
Series(rand(10)).plot(ax=cax)
fig, ax = self.plt.subplots()
from mpl_toolkits.axes_grid.inset_locator import inset_axes
iax = inset_axes(ax, width="30%", height=1., loc=3)
Series(rand(10)).plot(ax=ax)
Series(rand(10)).plot(ax=iax)
def test_passed_bar_colors(self):
import matplotlib as mpl
color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
colormap = mpl.colors.ListedColormap(color_tuples)
barplot = pd.DataFrame([[1, 2, 3]]).plot(kind="bar", cmap=colormap)
assert color_tuples == [c.get_facecolor() for c in barplot.patches]
def test_rcParams_bar_colors(self):
import matplotlib as mpl
color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
try: # mpl 1.5
with mpl.rc_context(
rc={'axes.prop_cycle': mpl.cycler("color", color_tuples)}):
barplot = pd.DataFrame([[1, 2, 3]]).plot(kind="bar")
except (AttributeError, KeyError): # mpl 1.4
with mpl.rc_context(rc={'axes.color_cycle': color_tuples}):
barplot = pd.DataFrame([[1, 2, 3]]).plot(kind="bar")
assert color_tuples == [c.get_facecolor() for c in barplot.patches]
def _generate_4_axes_via_gridspec():
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.gridspec # noqa
gs = mpl.gridspec.GridSpec(2, 2)
ax_tl = plt.subplot(gs[0, 0])
ax_ll = plt.subplot(gs[1, 0])
ax_tr = plt.subplot(gs[0, 1])
ax_lr = plt.subplot(gs[1, 1])
return gs, [ax_tl, ax_ll, ax_tr, ax_lr]
| mit |
pysal/pPysal | chol/cholesky.py | 1 | 7491 | from math import sqrt
import time
from cchol import naivechol as cnaive
from cchol import updateelement
import numpy as np
from numpy.random import randint
from numpy.linalg import cholesky
import matplotlib.pyplot as plt
'''
def blaschol(a):
"""
Non-blocked BLAS style cholesky
"""
n = a.shape[0]
for j in range(n):
# u_{j,j} = \sqrt{A_{j,j} - \sum_k=1^j-1 L_^{2}_{j,k}}
if j >= 1:
#Is np.dot() faster or slower than np.sum(a**2)?
a[j,j] = sqrt(a[j,j] - np.dot(a[0:j:,0], a[0:j:,0]))
else:
a[j,j] = sqrt(a[j,j])
if j < n:
for i in range(
#Compute elements of j+1:n of column j
x = a[j:j+1,j+1:]
y = a[j+1:j+2, j+1:]
#A_{i,j} - \sum_{k=1}^{j-1} L_{i,k} * L_{j,k}
# Element wise multiplication.
#The summation is y=\alpha * A * x + \beta y, where x and y are vectors
# A is a submatrix, \alpha = -1 and \beta = 1.0
#This simplifies to y=\alpha * -1 * x + y
#submatrix = a[j+1:,j+1:]
#x =
#y =
#Multiply the submatrix by 1/a[j,j]
a[j+1:] *= (1/a[j,j])
print a
'''
def purepythoncholesky(A):
"""Performs a Cholesky decomposition of A, which must
be a symmetric and positive definite matrix. The function
returns the lower variant triangular matrix, L."""
n = len(A)
# Create zero matrix for L
L = [[0.0] * n for i in xrange(n)]
# Perform the Cholesky decomposition
for i in xrange(n):
for k in xrange(i+1):
tmp_sum = sum(L[i][j] * L[k][j] for j in xrange(k))
if (i == k): # Diagonal elements
# LaTeX: l_{kk} = \sqrt{ a_{kk} - \sum^{k-1}_{j=1} l^2_{kj}}
L[i][k] = sqrt(A[i][i] - tmp_sum)
else:
# LaTeX: l_{ik} = \frac{1}{l_{kk}} \left( a_{ik} - \sum^{k-1}_{j=1} l_{ij} l_{kj} \right)
L[i][k] = (1.0 / L[k][k] * (A[i][k] - tmp_sum))
return L
def naivechol(a):
"""
A naive cholesky decomposition using an unoptimized
Cholesky-Banachiewicz implementation.
Reverse i, j to j, i for a Cholesky-Crout implementation.
This implementation operates in-place and is O(n^3/6)
"""
#Cast to float since the random SPD matrix comes in as int
#Algorithm
n = a.shape[0]
for k in range(0, n):
a[k,k] = sqrt(a[k, k])
for i in range(k + 1, n):
a[i, k] = a[i, k] / a[k, k]
for j in range(k + 1, n):
for i in range(j, n):
a[i, j] = a[i, j] - a[i, k] * a[j, k]
#Since modified in place, set the UT to 0, offest by 1
# from the diagonal
idx = np.triu_indices(n, 1)
a[idx] = 0
return a
def mixedcython(a):
"""
This is a naive implementation with cython slotted in
for the inner for loop.
"""
n = a.shape[0]
for k in range(0, n):
a[k,k] = sqrt(a[k, k])
for i in range(k + 1, n):
a[i, k] = a[i, k] / a[k, k]
for j in range(k + 1, n):
updateelement(a, j, k, n)
idx = np.triu_indices(n, 1)
a[idx] = 0
return a
#@profile
def vectorizedchol(a):
"""
This is the naive chol, but vectorized by column.
"""
n = a.shape[0]
for k in range(0, n):
#Have to recompute every time stepping 'down' diagonal
a[k,k] = sqrt(a[k,k])
#Update the column
a[k+1:,k] = a[k+1:,k] / a[k,k]
#Update the remainder of the lower triangle
c = 0
for j in range(k + 1, n): # For each remaining column
a[c+j:, j] = a[c+j:, j] - (a[j:, k] *a[j,k])
c += 1
#Since modified in place, set the UT to 0, offest by 1
# from the diagonal
idx = np.triu_indices(n, 1)
a[idx] = 0
return a
def cython_implementation(a):
"""
Calls a naive Cython implementation.
"""
cnaive(a)
idx = np.triu_indices(a.shape[0], 1)
a[idx] = 0
return a
def main():
li = [10, 25, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 1000,
1500, 2000, 2500, 3000]
x = np.array(li)
y = np.zeros((6, x.shape[0]))
for i, s in enumerate(li):
print '\n'
print "n = {}".format(s**2)
print '---------------------------'
a = np.asfortranarray(randint(0,10,size=(s, s)), dtype='d')
#Conversion using copy from int64 to float64
#Dot should ensure positive semi-definite
data = np.dot(a, a.T)
del a
#Numpy Cholesky Decomposition (Dense Matrix)
spd_matrix = data.copy()
t1 = time.time()
np_chol = cholesky(spd_matrix)
t2 = time.time()
print "Numpy Implementation took {} seconds".format(t2-t1)
y[0][i] = t2-t1
if s <= 200:
#Naive
spd_matrix = data.copy()
t1 = time.time()
out = naivechol(spd_matrix)
t2 = time.time()
np.testing.assert_allclose(out, np_chol, rtol=1e-07)
print "Naive implementation took {} seconds".format(t2-t1)
y[1][i] = t2-t1
else:
y[1][i] = np.nan
#Cython
spd_matrix = data.copy()
t1 = time.time()
out = cython_implementation(spd_matrix)
t2 = time.time()
#np.testing.assert_allclose(out, np_chol, rtol=1e-07)
print "Cython implementation took {} seconds".format(t2-t1)
y[2][i] = t2-t1
if s <= 500:
#Vectorized
spd_matrix = data.copy()
t1 = time.time()
out = vectorizedchol(spd_matrix)
t2 = time.time()
#np.testing.assert_allclose(out, np_chol, rtol=1e-07)
print "Vectorized implementation took {} seconds".format(t2-t1)
y[3][i] = t2-t1
else:
y[3][i] = np.nan
if s <= 500:
#Mixed Naive / Cython
spd_matrix = data.copy()
t1 = time.time()
out = mixedcython(spd_matrix)
t2 = time.time()
np.testing.assert_allclose(out, np_chol, rtol=1e-07)
print "Mixed Cython/Python implementation took {} seconds".format(t2-t1)
y[4][i] = t2 - t1
else:
y[4][i] = np.nan
if s <=500:
#new cholesky
spd_matrix = data.copy()
t1 = time.time()
out = purepythoncholesky(spd_matrix)
t2 = time.time()
print "Pure Python Cholesky implementation took {} seconds".format(t2-t1)
y[5][i] = t2-t1
else:
y[5][i] = np.nan
#plot
labels = ['NP', 'Naive', 'Cython', 'Vect', 'Mixed C/P']
for i in range(y.shape[0]):
plt.plot(x, y[i], label=labels[i])
plt.show()
def prof():
s = 500
a = np.zeros(shape=(s,s), dtype=np.float64, order='F')
a[:] = randint(0,10,size=(s, s))
#Conversion using copy from int64 to float64
a = a.astype(np.float64, copy=False, order='F')
#Dot should ensure positive semi-definite
data = np.dot(a, a.T)
vectorizedchol(data)
def testvectorized():
b = np.array([[9,9,6,-1],[9,30,5,22],[6,5,10,0],[-1,22,0,36.0]], order='F')
print vectorizedchol(b)
def testblaschol():
b = np.array([[9,9,6,-1],[9,30,5,22],[6,5,10,0],[-1,22,0,36.0]], order='F')
blaschol(b)
if __name__ == '__main__':
#prof()
main()
#testvectorized()
#testblaschol()
| bsd-3-clause |
pianomania/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 161 | 1380 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
unsiloai/syntaxnet-ops-hack | tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py | 92 | 4535 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn as core_pandas_input_fn
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""This input_fn diffs from the core version with default `shuffle`."""
return core_pandas_input_fn(x=x,
y=y,
batch_size=batch_size,
shuffle=shuffle,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
target_column=target_column)
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
| apache-2.0 |
joshbohde/scikit-learn | examples/grid_search_text_feature_extraction.py | 1 | 4106 | """
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving there name to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__analyzer__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__analyzer__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
print __doc__
# Author: Olivier Grisel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# License: Simplified BSD
from pprint import pprint
from time import time
import os
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model.sparse import SGDClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
################################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print "Loading 20 newsgroups dataset for categories:"
print categories
data = fetch_20newsgroups(subset='train', categories=categories)
print "%d documents" % len(data.filenames)
print "%d categories" % len(data.target_names)
print
################################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
parameters = {
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
'vect__max_df': (0.5, 0.75, 1.0),
# 'vect__max_features': (None, 5000, 10000, 50000),
'vect__analyzer__max_n': (1, 2), # words or bigrams
# 'tfidf__use_idf': (True, False),
# 'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
# 'clf__n_iter': (10, 50, 80),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# cross-validation doesn't work if the length of the data is not known,
# hence use lists instead of iterators
text_docs = [file(f).read() for f in data.filenames]
print "Performing grid search..."
print "pipeline:", [name for name, _ in pipeline.steps]
print "parameters:"
pprint(parameters)
t0 = time()
grid_search.fit(text_docs, data.target)
print "done in %0.3fs" % (time() - t0)
print
print "Best score: %0.3f" % grid_search.best_score
print "Best parameters set:"
best_parameters = grid_search.best_estimator._get_params()
for param_name in sorted(parameters.keys()):
print "\t%s: %r" % (param_name, best_parameters[param_name])
| bsd-3-clause |
brunojulia/ultracoldUB | brightsolitons/gpe_bs_plots.py | 1 | 2131 | # coding: utf-8
import matplotlib.pyplot as plt
import numpy as np
def plot_convergence(x,y,n):
f2=plt.figure()
plt.title('Convergence',fontsize=15)
plt.xlabel('time ($t \, \\omega_{\\xi}$)',fontsize=15)
plt.ylabel('Energy per particle ($E/\\hbar \,\\omega_{\\xi}$)',fontsize=15)
plt.xticks(np.arange(0, x[n]+1,x[n]/5))
plt.locator_params('y',nbins=3)
plt.plot(x, y[:,0], 'r-',label="$E_{med}$") # plot only average energy
#plt.plot(x, y[:,2], 'b-',label="$E_{kin}$")
#plt.plot(x, y[:,3], 'g-',label="$E_{pot}$")
#plt.plot(x, y[:,4], 'y-',label="$E_{int}$")
#plt.plot(x, y[:,1], 'm',label="$\\mu$")
#plt.legend(fontsize=15)
f2.show()
def plot_wave_function(x,y):
f3=plt.figure()
plt.title('Wave Function Integral',fontsize=15)
plt.xlabel('time ($t \, \\omega_{\\xi}$)',fontsize=15)
# plt.ylabel(' ',fontsize=15)
plt.plot(x, y[:,0], 'r-', label='left side')
plt.plot(x, y[:,1], 'g-', label='inside')
plt.plot(x, y[:,2], 'b-', label='right side')
plt.legend(fontsize=15)
f3.show()
def plot_density(z,psi,Lz,t):
f4=plt.figure()
plt.title('State at $t \,\\omega_{ho}=%g$'%(t),fontsize=15)
plt.xlabel('$x/a_{ho}$',fontsize=15)
plt.xticks(np.arange(-Lz, Lz+1,Lz/2))
plt.locator_params('y',nbins=3)
plt.plot(z, abs(psi)**2, 'b-',label='$|\psi|^2$') # plot density
plt.legend(fontsize=15)
f4.show()
def plot_phase(z,psi,Lz,t):
f5=plt.figure()
plt.title('State at $t \,\\omega_{ho}=%g$'%(t),fontsize=15)
plt.xlabel('$x/a_{ho}$',fontsize=15)
plt.xticks(np.arange(-Lz, Lz+1,Lz/2))
plt.locator_params('y',nbins=3)
plt.plot(z, np.angle(psi), 'b.',label='$Arg(\psi)$')
plt.legend(fontsize=15)
f5.show()
def plot_real_imag(z,psi,Lz,t):
f6=plt.figure()
plt.title('State at $t \,\\omega_{ho}=%g$'%(t),fontsize=15)
plt.xlabel('$x/a_{ho}$',fontsize=15)
plt.xticks(np.arange(-Lz, Lz+1,Lz/2))
plt.locator_params('y',nbins=3)
plt.plot(z, psi.real, 'r.',label='real$(\psi)$')
plt.plot(z, psi.imag, 'b--',label='imag$(\psi)$')
plt.legend(fontsize=15)
f6.show()
| gpl-3.0 |
DiamondLightSource/auto_tomo_calibration-experimental | old_code_scripts/measure_resolution/lmfit-py/lmfit/ui/basefitter.py | 7 | 12312 | import warnings
import numpy as np
from ..model import Model
from ..models import ExponentialModel # arbitrary default
from ..asteval import Interpreter
from ..astutils import NameFinder
from ..parameter import check_ast_errors
_COMMON_DOC = """
This an interactive container for fitting models to particular data.
It maintains the attributes `current_params` and `current_result`. When
its fit() method is called, the best fit becomes the new `current_params`.
The most basic usage is iteratively fitting data, taking advantage of
this stateful memory that keep the parameters between each fit.
"""
_COMMON_EXAMPLES_DOC = """
Examples
--------
>>> fitter = Fitter(data, model=SomeModel, x=x)
>>> fitter.model
# This property can be changed, to try different models on the same
# data with the same independent vars.
# (This is especially handy in the notebook.)
>>> fitter.current_params
# This copy of the model's Parameters is updated after each fit.
>>> fitter.fit()
# Perform a fit using fitter.current_params as a guess.
# Optionally, pass a params argument or individual keyword arguments
# to override current_params.
>>> fitter.current_result
# This is the result of the latest fit. It contain the usual
# copies of the Parameters, in the attributes params and init_params.
>>> fitter.data = new_data
# If this property is updated, the `current_params` are retained an used
# as an initial guess if fit() is called again.
"""
class BaseFitter(object):
__doc__ = _COMMON_DOC + """
Parameters
----------
data : array-like
model : lmfit.Model
optional initial Model to use, maybe be set or changed later
""" + _COMMON_EXAMPLES_DOC
def __init__(self, data, model=None, **kwargs):
self._data = data
self.kwargs = kwargs
# GUI-based subclasses need a default value for the menu of models,
# and so an arbitrary default is applied here, for uniformity
# among the subclasses.
if model is None:
model = ExponentialModel
self.model = model
def _on_model_value_change(self, name, value):
self.model = value
def _on_fit_button_click(self, b):
self.fit()
def _on_guess_button_click(self, b):
self.guess()
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def model(self):
return self._model
@model.setter
def model(self, value):
if callable(value):
model = value()
else:
model = value
self._model = model
self.current_result = None
self._current_params = model.make_params()
# Use these to evaluate any Parameters that use expressions.
self.asteval = Interpreter()
self.namefinder = NameFinder()
self._finalize_model(value)
self.guess()
def _finalize_model(self, value):
# subclasses optionally override to update display here
pass
@property
def current_params(self):
"""Each time fit() is called, these will be updated to reflect
the latest best params. They will be used as the initial guess
for the next fit, unless overridden by arguments to fit()."""
return self._current_params
@current_params.setter
def current_params(self, new_params):
# Copy contents, but retain original params objects.
for name, par in new_params.items():
self._current_params[name].value = par.value
self._current_params[name].expr = par.expr
self._current_params[name].vary = par.vary
self._current_params[name].min = par.min
self._current_params[name].max = par.max
# Compute values for expression-based Parameters.
self.__assign_deps(self._current_params)
for _, par in self._current_params.items():
if par.value is None:
self.__update_paramval(self._current_params, par.name)
self._finalize_params()
def _finalize_params(self):
# subclasses can override this to pass params to display
pass
def guess(self):
count_indep_vars = len(self.model.independent_vars)
guessing_successful = True
try:
if count_indep_vars == 0:
guess = self.model.guess(self._data)
elif count_indep_vars == 1:
key = self.model.independent_vars[0]
val = self.kwargs[key]
d = {key: val}
guess = self.model.guess(self._data, **d)
except NotImplementedError:
guessing_successful = False
self.current_params = guess
return guessing_successful
def __assign_deps(self, params):
# N.B. This does not use self.current_params but rather
# new Parameters that are being built by self.guess().
for name, par in params.items():
if par.expr is not None:
par.ast = self.asteval.parse(par.expr)
check_ast_errors(self.asteval.error)
par.deps = []
self.namefinder.names = []
self.namefinder.generic_visit(par.ast)
for symname in self.namefinder.names:
if (symname in self.current_params and
symname not in par.deps):
par.deps.append(symname)
self.asteval.symtable[name] = par.value
if par.name is None:
par.name = name
def __update_paramval(self, params, name):
# N.B. This does not use self.current_params but rather
# new Parameters that are being built by self.guess().
par = params[name]
if getattr(par, 'expr', None) is not None:
if getattr(par, 'ast', None) is None:
par.ast = self.asteval.parse(par.expr)
if par.deps is not None:
for dep in par.deps:
self.__update_paramval(params, dep)
par.value = self.asteval.run(par.ast)
out = check_ast_errors(self.asteval.error)
if out is not None:
self.asteval.raise_exception(None)
self.asteval.symtable[name] = par.value
def fit(self, *args, **kwargs):
"Use current_params unless overridden by arguments passed here."
guess = dict(self.current_params)
guess.update(self.kwargs) # from __init__, e.g. x=x
guess.update(kwargs)
self.current_result = self.model.fit(self._data, *args, **guess)
self.current_params = self.current_result.params
class MPLFitter(BaseFitter):
# This is a small elaboration on BaseModel; it adds a plot()
# method that depends on matplotlib. It adds several plot-
# styling arguments to the signature.
__doc__ = _COMMON_DOC + """
Parameters
----------
data : array-like
model : lmfit.Model
optional initial Model to use, maybe be set or changed later
Additional Parameters
---------------------
axes_style : dictionary representing style keyword arguments to be
passed through to `Axes.set(...)`
data_style : dictionary representing style keyword arguments to be passed
through to the matplotlib `plot()` command the plots the data points
init_style : dictionary representing style keyword arguments to be passed
through to the matplotlib `plot()` command the plots the initial fit
line
best_style : dictionary representing style keyword arguments to be passed
through to the matplotlib `plot()` command the plots the best fit
line
**kwargs : independent variables or extra arguments, passed like `x=x`
""" + _COMMON_EXAMPLES_DOC
def __init__(self, data, model=None, axes_style={},
data_style={}, init_style={}, best_style={}, **kwargs):
self.axes_style = axes_style
self.data_style = data_style
self.init_style = init_style
self.best_style = best_style
super(MPLFitter, self).__init__(data, model, **kwargs)
def plot(self, axes_style={}, data_style={}, init_style={}, best_style={},
ax=None):
"""Plot data, initial guess fit, and best fit.
Optional style arguments pass keyword dictionaries through to their
respective components of the matplotlib plot.
Precedence is:
1. arguments passed to this function, plot()
2. arguments passed to the Fitter when it was first declared
3. hard-coded defaults
Parameters
---------------------
axes_style : dictionary representing style keyword arguments to be
passed through to `Axes.set(...)`
data_style : dictionary representing style keyword arguments to be passed
through to the matplotlib `plot()` command the plots the data points
init_style : dictionary representing style keyword arguments to be passed
through to the matplotlib `plot()` command the plots the initial fit
line
best_style : dictionary representing style keyword arguments to be passed
through to the matplotlib `plot()` command the plots the best fit
line
ax : matplotlib.Axes
optional `Axes` object. Axes will be generated if not provided.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib is required to use this Fitter. "
"Use BaseFitter or a subclass thereof "
"that does not depend on matplotlib.")
# Configure style
_axes_style= dict() # none, but this is here for possible future use
_axes_style.update(self.axes_style)
_axes_style.update(axes_style)
_data_style= dict(color='blue', marker='o', linestyle='none')
_data_style.update(**_normalize_kwargs(self.data_style, 'line2d'))
_data_style.update(**_normalize_kwargs(data_style, 'line2d'))
_init_style = dict(color='gray')
_init_style.update(**_normalize_kwargs(self.init_style, 'line2d'))
_init_style.update(**_normalize_kwargs(init_style, 'line2d'))
_best_style= dict(color='red')
_best_style.update(**_normalize_kwargs(self.best_style, 'line2d'))
_best_style.update(**_normalize_kwargs(best_style, 'line2d'))
if ax is None:
fig, ax = plt.subplots()
count_indep_vars = len(self.model.independent_vars)
if count_indep_vars == 0:
ax.plot(self._data, **_data_style)
elif count_indep_vars == 1:
indep_var = self.kwargs[self.model.independent_vars[0]]
ax.plot(indep_var, self._data, **_data_style)
else:
raise NotImplementedError("Cannot plot models with more than one "
"indepedent variable.")
result = self.current_result # alias for brevity
if not result:
ax.set(**_axes_style)
return # short-circuit the rest of the plotting
if count_indep_vars == 0:
ax.plot(result.init_fit, **_init_style)
ax.plot(result.best_fit, **_best_style)
elif count_indep_vars == 1:
ax.plot(indep_var, result.init_fit, **_init_style)
ax.plot(indep_var, result.best_fit, **_best_style)
ax.set(**_axes_style)
def _normalize_kwargs(kwargs, kind='patch'):
"""Convert matplotlib keywords from short to long form."""
# Source:
# github.com/tritemio/FRETBursts/blob/fit_experim/fretbursts/burst_plot.py
if kind == 'line2d':
long_names = dict(c='color', ls='linestyle', lw='linewidth',
mec='markeredgecolor', mew='markeredgewidth',
mfc='markerfacecolor', ms='markersize',)
elif kind == 'patch':
long_names = dict(c='color', ls='linestyle', lw='linewidth',
ec='edgecolor', fc='facecolor',)
for short_name in long_names:
if short_name in kwargs:
kwargs[long_names[short_name]] = kwargs.pop(short_name)
return kwargs
| apache-2.0 |
yebrahim/pydatalab | google/datalab/bigquery/_schema.py | 5 | 12449 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Implements Table and View Schema APIs."""
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import str
from builtins import range
from past.builtins import basestring
from builtins import object
import datetime
import pandas
import pprint
class SchemaField(object):
""" Represents a single field in a Table schema.
This has the properties:
- name: the flattened, full-qualified name of the field.
- type: the type of the field as a string ('INTEGER', 'BOOLEAN', 'FLOAT', 'STRING'
or 'TIMESTAMP').
- mode: the mode of the field; 'NULLABLE' by default.
- description: a description of the field, if known; empty string by default.
"""
def __init__(self, name, type, mode='NULLABLE', description=''):
self.name = name
self.type = type
self.mode = mode
self.description = description
def _repr_sql_(self):
"""Returns a representation of the field for embedding into a SQL statement.
Returns:
A formatted field name for use within SQL statements.
"""
return self.name
def __eq__(self, other):
""" Compare two schema field objects for equality (ignoring description). """
return self.name == other.name and self.type == other.type\
and self.mode == other.mode
def __repr__(self):
""" Returns the schema field as a string form of a dictionary. """
return 'BigQuery Schema Field:\n%s' % pprint.pformat(vars(self), width=1)
def __getitem__(self, item):
# TODO(gram): Currently we need this for a Schema object to work with the Parser object.
# Eventually if we change Parser to only work with Schema (and not also with the
# schema dictionaries in query results) we can remove this.
if item == 'name':
return self.name
if item == 'type':
return self.type
if item == 'mode':
return self.mode
if item == 'description':
return self.description
class Schema(list):
"""Represents the schema of a BigQuery table as a flattened list of objects representing fields.
Each field object has name, type, mode and description properties. Nested fields
get flattened with their full-qualified names. So a Schema that has an object A with nested
field B will be represented as [(name: 'A', ...), (name: 'A.b', ...)].
"""
@staticmethod
def _from_dataframe(dataframe, default_type='STRING'):
"""
Infer a BigQuery table schema from a Pandas dataframe. Note that if you don't explicitly set
the types of the columns in the dataframe, they may be of a type that forces coercion to
STRING, so even though the fields in the dataframe themselves may be numeric, the type in the
derived schema may not be. Hence it is prudent to make sure the Pandas dataframe is typed
correctly.
Args:
dataframe: The DataFrame.
default_type : The default big query type in case the type of the column does not exist in
the schema. Defaults to 'STRING'.
Returns:
A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a
BigQuery Tables resource schema.
"""
type_mapping = {
'i': 'INTEGER',
'b': 'BOOLEAN',
'f': 'FLOAT',
'O': 'STRING',
'S': 'STRING',
'U': 'STRING',
'M': 'TIMESTAMP'
}
fields = []
for column_name, dtype in dataframe.dtypes.iteritems():
fields.append({'name': column_name,
'type': type_mapping.get(dtype.kind, default_type)})
return fields
@staticmethod
def _get_field_entry(name, value):
entry = {'name': name}
if isinstance(value, datetime.datetime):
_type = 'TIMESTAMP'
elif isinstance(value, datetime.date):
_type = 'DATE'
elif isinstance(value, datetime.time):
_type = 'TIME'
elif isinstance(value, bool):
_type = 'BOOLEAN'
elif isinstance(value, float):
_type = 'FLOAT'
elif isinstance(value, int):
_type = 'INTEGER'
elif isinstance(value, dict) or isinstance(value, list):
_type = 'RECORD'
entry['fields'] = Schema._from_record(value)
else:
_type = 'STRING'
entry['type'] = _type
return entry
@staticmethod
def _from_dict_record(data):
"""
Infer a BigQuery table schema from a dictionary. If the dictionary has entries that
are in turn OrderedDicts these will be turned into RECORD types. Ideally this will
be an OrderedDict but it is not required.
Args:
data: The dict to infer a schema from.
Returns:
A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a
BigQuery Tables resource schema.
"""
return [Schema._get_field_entry(name, value) for name, value in list(data.items())]
@staticmethod
def _from_list_record(data):
"""
Infer a BigQuery table schema from a list of values.
Args:
data: The list of values.
Returns:
A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a
BigQuery Tables resource schema.
"""
return [Schema._get_field_entry('Column%d' % (i + 1), value) for i, value in enumerate(data)]
@staticmethod
def _from_record(data):
"""
Infer a BigQuery table schema from a list of fields or a dictionary. The typeof the elements
is used. For a list, the field names are simply 'Column1', 'Column2', etc.
Args:
data: The list of fields or dictionary.
Returns:
A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a
BigQuery Tables resource schema.
"""
if isinstance(data, dict):
return Schema._from_dict_record(data)
elif isinstance(data, list):
return Schema._from_list_record(data)
else:
raise Exception('Cannot create a schema from record %s' % str(data))
@staticmethod
def from_record(source):
"""
Infers a table/view schema from a single record that can contain a list of fields or a
dictionary of fields. The type of the elements is used for the types in the schema. For a
dict, key names are used for column names while for a list, the field names are simply named
'Column1', 'Column2', etc. Note that if using a dict you may want to use an OrderedDict
to ensure column ordering is deterministic.
Args:
source: The list of field values or dictionary of key/values.
Returns:
A Schema for the data.
"""
# TODO(gram): may want to allow an optional second argument which is a list of field
# names; could be useful for the record-containing-list case.
return Schema(Schema._from_record(source))
@staticmethod
def from_data(source):
"""Infers a table/view schema from its JSON representation, a list of records, or a Pandas
dataframe.
Args:
source: the Pandas Dataframe, a dictionary representing a record, a list of heterogeneous
data (record) or homogeneous data (list of records) from which to infer the schema, or
a definition of the schema as a list of dictionaries with 'name' and 'type' entries
and possibly 'mode' and 'description' entries. Only used if no data argument was provided.
'mode' can be 'NULLABLE', 'REQUIRED' or 'REPEATED'. For the allowed types, see:
https://cloud.google.com/bigquery/preparing-data-for-bigquery#datatypes
Note that there is potential ambiguity when passing a list of lists or a list of
dicts between whether that should be treated as a list of records or a single record
that is a list. The heuristic used is to check the length of the entries in the
list; if they are equal then a list of records is assumed. To avoid this ambiguity
you can instead use the Schema.from_record method which assumes a single record,
in either list of values or dictionary of key-values form.
Returns:
A Schema for the data.
"""
if isinstance(source, pandas.DataFrame):
bq_schema = Schema._from_dataframe(source)
elif isinstance(source, list):
if len(source) == 0:
bq_schema = source
elif all(isinstance(d, dict) for d in source):
if all('name' in d and 'type' in d for d in source):
# It looks like a bq_schema; use it as-is.
bq_schema = source
elif all(len(d) == len(source[0]) for d in source):
bq_schema = Schema._from_dict_record(source[0])
else:
raise Exception(('Cannot create a schema from heterogeneous list %s; perhaps you meant ' +
'to use Schema.from_record?') % str(source))
elif isinstance(source[0], list) and \
all([isinstance(l, list) and len(l) == len(source[0]) for l in source]):
# A list of lists all of the same length; treat first entry as a list record.
bq_schema = Schema._from_record(source[0])
else:
# A heterogeneous list; treat as a record.
raise Exception(('Cannot create a schema from heterogeneous list %s; perhaps you meant ' +
'to use Schema.from_record?') % str(source))
elif isinstance(source, dict):
bq_schema = Schema._from_record(source)
else:
raise Exception('Cannot create a schema from %s' % str(source))
return Schema(bq_schema)
def __init__(self, definition=None):
"""Initializes a Schema from its raw JSON representation, a Pandas Dataframe, or a list.
Args:
definition: a definition of the schema as a list of dictionaries with 'name' and 'type'
entries and possibly 'mode' and 'description' entries. Only used if no data argument was
provided. 'mode' can be 'NULLABLE', 'REQUIRED' or 'REPEATED'. For the allowed types, see:
https://cloud.google.com/bigquery/preparing-data-for-bigquery#datatypes
"""
super(Schema, self).__init__()
self._map = {}
self._bq_schema = definition
self._populate_fields(definition)
def __getitem__(self, key):
"""Provides ability to lookup a schema field by position or by name.
"""
if isinstance(key, basestring):
return self._map.get(key, None)
# noinspection PyCallByClass
return list.__getitem__(self, key)
def _add_field(self, name, type, mode='NULLABLE', description=''):
field = SchemaField(name, type, mode, description)
self.append(field)
self._map[name] = field
def find(self, name):
""" Get the index of a field in the flattened list given its (fully-qualified) name.
Args:
name: the fully-qualified name of the field.
Returns:
The index of the field, if found; else -1.
"""
for i in range(0, len(self)):
if self[i].name == name:
return i
return -1
def _populate_fields(self, data, prefix=''):
for field_data in data:
name = prefix + field_data['name']
type = field_data['type']
self._add_field(name, type, field_data.get('mode', None),
field_data.get('description', None))
if type == 'RECORD':
# Recurse into the nested fields, using this field's name as a prefix.
self._populate_fields(field_data.get('fields'), name + '.')
def __repr__(self):
""" Returns a string representation of the schema for notebooks."""
return 'BigQuery Schema - Fields:\n%s' % pprint.pformat(self._bq_schema, width=1)
def __eq__(self, other):
""" Compares two schema for equality. """
other_map = other._map
if len(self._map) != len(other_map):
return False
for name in self._map.keys():
if name not in other_map:
return False
if not self._map[name] == other_map[name]:
return False
return True
def __ne__(self, other):
""" Compares two schema for inequality. """
return not(self.__eq__(other))
| apache-2.0 |
aabadie/scikit-learn | sklearn/ensemble/forest.py | 5 | 66535 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import hstack as sparse_hstack
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..exceptions import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount, parallel_helper
from ..utils.multiclass import check_classification_targets
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""Return the decision path in the forest
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
n_nodes_ptr : array of size (n_estimators + 1, )
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'decision_path', X,
check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, accept_sparse="csc", dtype=DTYPE)
y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'subsample', 'balanced_subsample')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated in 0.17 and"
"will be removed in 0.19. It was replaced by "
"class_weight='balanced_subsample' using the balanced"
"strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced",
"balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : integer, optional (default=10)
Number of trees in the forest.
max_depth : integer, optional (default=5)
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
min_impurity_split=1e-7,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
IMAMBAKS/data_viz_pa | falcon_server.py | 1 | 5739 | # NOTE THIS FILE SHOULD BE REFACTORED!!!
# sample.py
import falcon
import pandas as pd
from falcon_cors import CORS
from waitress import serve
cors = CORS(allow_origins_list=['http://localhost:3000/', 'http://localhost:*', 'http://localhost:3000'])
df = pd.read_hdf('log_relatics.h5')
def inter_vs_extern_workspaces(parameter, *args, freq: str = 'W') -> pd.DataFrame:
if args:
df0 = df[parameter:args[0]]
else:
df0 = df[parameter]
df0 = df0[['user_name', 'user_email']]
df2 = df0.dropna(axis=0)
df3 = df2.groupby('user_name')['user_email'].unique() # type -> pd.Series
df3 = df3.apply(lambda x: x[0])
df0['user_email'] = df0['user_name'].map(df3)
df0['scope'] = df0['user_email'].apply(lambda x: 'intern' if 'arcadis' in str(x).lower() else 'extern')
name = df0.groupby([pd.Grouper(freq='M'), 'scope'])
query = name.user_name.nunique()
new = query.unstack().reset_index()
return new
def workspace_activity_in_time(parameter, *args, freq: str = 'W') -> pd.DataFrame:
if args:
df2 = df[parameter:args[0]]
else:
df2 = df[parameter]
# df2 = df2[df2.workspace_name != 'Zuidas_SEM']
name = df2.groupby([pd.Grouper(freq=freq), 'workspace_name']).apply(
lambda x: x.user_name.nunique() if x.user_name.nunique() > 0 else None).dropna(axis=0)
name = name.rename("value")
return name
def get_year(parameter, *args, freq: str = 'W') -> pd.DataFrame:
if args:
df2 = df[parameter:args[0]]
else:
df2 = df[parameter]
name = df2.groupby(pd.Grouper(freq=freq))['user_name'].unique()
return name
def get_top_ten_users(parameter, *args) -> pd.DataFrame:
if args:
df2 = df[parameter:args[0]]
else:
df2 = df[parameter]
name = df2.groupby('user_name')['user_name'].count().sort_values(ascending=True)[-10:]
return name
def get_top_ten_workspaces(parameter, *args) -> pd.DataFrame:
if args:
df2 = df[parameter:args[0]]
else:
df2 = df[parameter]
name = df2.groupby('workspace_name')['user_name'].nunique().sort_values(ascending=True)[-10:]
return name
class UserActivity:
def on_get(self, req, resp):
date1 = req.get_param('date1')
freq = req.get_param('freq')
if req.get_param('date2') is not None:
date2 = req.get_param('date2')
try:
query = get_year(date1, date2, freq=freq).to_json(date_format='epoch')
except:
query = ''
else:
try:
query = get_year(date1, freq=freq).to_json(date_format='epoch')
except:
query = ''
resp.body = query
class ExternalInternalUsers:
def on_get(self, req, resp):
date1 = req.get_param('date1')
freq = req.get_param('freq')
if req.get_param('date2') is not None:
date2 = req.get_param('date2')
try:
query = inter_vs_extern_workspaces(date1, date2, freq=freq).to_json(date_format='epoch',
orient='records')
except:
query = ''
else:
try:
query = inter_vs_extern_workspaces(date1, freq=freq).to_json(date_format='epoch',
orient='records')
except:
query = ''
print(query)
resp.body = query
class WorkSpaceActivityResource:
def on_get(self, req, resp):
date1 = req.get_param('date1')
freq = req.get_param('freq')
if req.get_param('date2') is not None:
date2 = req.get_param('date2')
try:
query = workspace_activity_in_time(date1, date2, freq=freq).reset_index().to_json(date_format='epoch',
orient='records')
except:
query = ''
else:
try:
query = workspace_activity_in_time(date1, freq=freq).reset_index().to_json(date_format='epoch',
orient='records')
except:
query = ''
resp.body = query
class WorkspaceResource:
def on_get(self, req, resp):
date1 = req.get_param('date1')
if req.get_param('date2') is not None:
date2 = req.get_param('date2')
try:
query = get_top_ten_workspaces(date1, date2).to_json()
except:
query = ''
else:
try:
query = get_top_ten_workspaces(date1).to_json()
except:
query = ''
resp.body = query
class UsersResource:
def on_get(self, req, resp):
date1 = req.get_param('date1')
if req.get_param('date2') is not None:
date2 = req.get_param('date2')
try:
query = get_top_ten_users(date1, date2).to_json()
except:
query = ''
else:
try:
query = get_top_ten_users(date1).to_json()
except:
query = ''
resp.body = query
api = falcon.API(middleware=[cors.middleware])
api.add_route('/activity', UserActivity())
api.add_route('/workspaces', WorkspaceResource())
api.add_route('/activity_workspaces', WorkSpaceActivityResource())
api.add_route('/users', UsersResource())
api.add_route('/intern_extern_users', ExternalInternalUsers())
serve(api, host='127.0.0.1', port=80)
| mit |
lenovor/scikit-learn | examples/preprocessing/plot_robust_scaling.py | 221 | 2702 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.fit_transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
| bsd-3-clause |
jmschrei/scikit-learn | examples/linear_model/plot_ols.py | 220 | 1940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
google-code-export/stoqs | stoqs/views/permalinks.py | 4 | 3457 | __author__ = 'Chander Ganesan'
__copyright__ = '2013'
__license__ = 'GPL v3'
__contact__ = 'chander at otg-nc.com'
__doc__ = '''
A set of views designed to generate a permalink based on a set of STOQS query
parameters.
Note that there should be work done at some point to prevent this view from
being misused, by validating the paramters/values passed in, I didn't do this
since I'm not 100% sure of all the use cases for STOQS. However, the danger
right now is that anyone could use this view to store arbitrary json data
in the database - and abuse the services of the provider hosting STOQS (and
even do nasty things like javascript injection things - though such things
won't impact STOQS web services, which only load the json, not run it.) Enabling
CSRF protection and account login as well would be great ideas and greatly mitigate
the danger here.
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
from django.core.exceptions import ObjectDoesNotExist, SuspiciousOperation
from django.http import Http404
from django.http import HttpResponse
from stoqs.views import BaseOutputer
from stoqs import models
##import matplotlib.pyplot as plt
import logging
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.views.decorators.csrf import csrf_exempt
import simplejson as json
from django.core.urlresolvers import reverse
import threading
_thread_local_vars = threading.local()
logger=logging.getLogger(__name__)
@csrf_exempt
def generate_permalink(request):
data=request.POST.get('parameters')
if data:
try:
# Just make sure it is valid json before storing it.
parameters=json.loads(data)
m=models.PermaLink(parameters=data)
m.save()
logger.debug('Saved link with id of %s', m.pk)
url="%s?permalink_id=%s" % (reverse('stoqs-query-ui',
kwargs={'dbAlias' :
request.META['dbAlias']}),
m.pk)
# url=reverse('redirect_permalink',
# kwargs={'dbAlias' : (request.META['dbAlias']),
# 'id': m.pk})
except Exception, e:
logger.exception('Doh!')
logger.debug('Attempt to create permalink without valid data')
raise SuspiciousOperation('Attempt to create permalink without any data, or with invalid data')
else:
# In the case where they request a permalink, but without selecting
# any parameters, we'll just return to them the current URL for the
# tool, so we don't store unnecessary permalinks
url=reverse('stoqs-query-ui',
kwargs={'dbAlias' : request.META['dbAlias']})
return HttpResponse(request.build_absolute_uri(url))
def load_permalink(request, id):
logger.debug('Got request for link with ID of %s', id)
try:
m=models.PermaLink.objects.get(pk=id)
m.usage_count = m.usage_count + 1
m.save()
# return the JSON for the permalink data
response=HttpResponse(m.parameters,
content_type="application/json")
return response
except ObjectDoesNotExist, e:
logger.debug('Attempted to get a permalink that does not exist: %s', id)
raise Http404 | gpl-3.0 |
PeterRochford/SkillMetrics | skill_metrics/plot_pattern_diagram_markers.py | 1 | 4916 | import matplotlib.pyplot as plt
import matplotlib.colors as clr
import matplotlib
import warnings
from skill_metrics import add_legend
def plot_pattern_diagram_markers(X,Y,option):
'''
Plots color markers on a pattern diagram.
Plots color markers on a target diagram according their (X,Y)
locations. The symbols and colors are chosen automatically with a
limit of 70 symbol & color combinations.
The color bar is titled using the content of option['titleColorBar']
(if non-empty string).
INPUTS:
x : x-coordinates of markers
y : y-coordinates of markers
z : z-coordinates of markers (used for color shading)
option : dictionary containing option values. (Refer to
GET_TARGET_DIAGRAM_OPTIONS function for more information.)
option['axismax'] : maximum for the X & Y values. Used to limit
maximum distance from origin to display markers
option['markerlabel'] : labels for markers
OUTPUTS:
None
Created on Nov 30, 2016
Revised on Jan 6, 2019
Author: Peter A. Rochford
Symplectic, LLC
www.thesymplectic.com
[email protected]
'''
# Set face color transparency
alpha = option['alpha']
# Set font and marker size
fontSize = matplotlib.rcParams.get('font.size') - 2
markerSize = option['markersize']
if option['markerlegend'] == 'on':
# Check that marker labels have been provided
if option['markerlabel'] == '':
raise ValueError('No marker labels provided.')
# Plot markers of different color and shapes with labels
# displayed in a legend
# Define markers
kind = ['+','o','x','s','d','^','v','p','h','*']
colorm = ['b','r','g','c','m','y','k']
if len(X) > 70:
_disp('You must introduce new markers to plot more than 70 cases.')
_disp('The ''marker'' character array need to be extended inside the code.')
if len(X) <= len(kind):
# Define markers with specified color
marker = []
markercolor = []
for color in colorm:
for symbol in kind:
marker.append(symbol + option['markercolor'])
rgba = clr.to_rgb(option['markercolor']) + (alpha,)
markercolor.append(rgba)
else:
# Define markers and colors using predefined list
marker = []
markercolor = [] #Bug Fix: missing array initialization
for color in colorm:
for symbol in kind:
marker.append(symbol + color)
rgba = clr.to_rgb(color) + (alpha,)
markercolor.append(rgba)
# Plot markers at data points
limit = option['axismax']
hp = ()
markerlabel = []
for i, xval in enumerate(X):
if abs(X[i]) <= limit and abs(Y[i]) <= limit:
h = plt.plot(X[i],Y[i],marker[i], markersize = markerSize,
markerfacecolor = markercolor[i],
markeredgecolor = marker[i][1],
markeredgewidth = 2)
hp += tuple(h)
markerlabel.append(option['markerlabel'][i])
# Add legend
if len(markerlabel) == 0:
warnings.warn('No markers within axis limit ranges.')
else:
add_legend(markerlabel, option, rgba, markerSize, fontSize, hp)
else:
# Plot markers as dots of a single color with accompanying labels
# and no legend
# Plot markers at data points
limit = option['axismax']
rgba = clr.to_rgb(option['markercolor']) + (alpha,)
for i,xval in enumerate(X):
if abs(X[i]) <= limit and abs(Y[i]) <= limit:
# Plot marker
marker = option['markersymbol']
plt.plot(X[i],Y[i],marker, markersize = markerSize,
markerfacecolor = rgba,
markeredgecolor = option['markercolor'])
# Check if marker labels provided
if type(option['markerlabel']) is list:
# Label marker
xtextpos = X[i]
ytextpos = Y[i]
plt.text(xtextpos,ytextpos,option['markerlabel'][i],
color = option['markerlabelcolor'],
verticalalignment = 'bottom',
horizontalalignment = 'right',
fontsize = fontSize)
# Add legend if labels provided as dictionary
markerlabel = option['markerlabel']
if type(markerlabel) is dict:
add_legend(markerlabel, option, rgba, markerSize, fontSize)
def _disp(text):
print(text)
| gpl-3.0 |
fyffyt/scikit-learn | sklearn/neighbors/approximate.py | 71 | 22357 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest()
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=None)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
lbishal/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
ImageMarkup/isbi-challenge-scoring | isic_challenge_scoring/task2.py | 1 | 1450 | # -*- coding: utf-8 -*-
import pathlib
from typing import Dict, List
import pandas as pd
from isic_challenge_scoring import metrics
from isic_challenge_scoring.confusion import createBinaryConfusionMatrix, normalizeConfusionMatrix
from isic_challenge_scoring.scoreCommon import iterImagePairs
def score(truthPath: pathlib.Path, predictionPath: pathlib.Path) -> List[Dict]:
confusionMatrics = pd.DataFrame([
createBinaryConfusionMatrix(
truthBinaryValues=truthImage > 128,
predictionBinaryValues=predictionImage > 128,
name=truthFileId
)
# TODO: truthFileId needs to include attribute
for truthImage, predictionImage, truthFileId in
iterImagePairs(truthPath, predictionPath)
])
# Normalize all values, since image sizes vary
normalizedConfusionMatrics = confusionMatrics.apply(
normalizeConfusionMatrix,
axis='columns'
)
sumConfusionMatrix = normalizedConfusionMatrics.sum(axis='index')
# TODO: per-attribute metrics
return [
{
'dataset': 'micro_average',
'metrics': [
{
'name': 'jaccard',
'value': metrics.binaryJaccard(sumConfusionMatrix)
},
{
'name': 'dice',
'value': metrics.binaryDice(sumConfusionMatrix)
},
]
}
]
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.