repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
sofiane87/lasagne-GAN | dcgan/dcgan_celeba.py | 1 | 8790 | from __future__ import print_function
from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D, concatenate
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam, SGD
import matplotlib.pyplot as plt
import sys
import os
import numpy as np
class DCGAN():
def __init__(self):
self.img_rows = 64
self.img_cols = 64
self.channels = 3
self.save_img_folder = 'dcgan/images/'
optimizer = Adam(0.0002, 0.5)
optimizer_dis = Adam(0.0002, 0.5)
self.latent_dim = 200
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer_dis,
metrics=['accuracy'])
# Build and compile the generator
self.generator = self.build_generator()
self.generator.compile(loss='binary_crossentropy', optimizer=optimizer)
# The generator takes noise as input and generated imgs
z = Input(shape=(200,))
img = self.generator(z)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The valid takes generated images as input and determines validity
valid = self.discriminator(img)
# The combined model (stacked generator and discriminator) takes
# noise as input => generates images => determines validity
self.combined = Model(z, valid)
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
def build_generator(self):
noise_shape = (200,)
model = Sequential()
model.add(Dense(512 * 4 * 4, activation="relu", input_shape=noise_shape))
model.add(Reshape((4, 4, 512)))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(256, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(32, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(3, kernel_size=3, padding="same"))
model.add(Activation("tanh"))
model.summary()
noise = Input(shape=noise_shape)
img = model(noise)
return Model(noise, img)
def build_discriminator(self):
img_shape = (self.img_rows, self.img_cols, self.channels)
model = Sequential()
model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.summary()
img = Input(shape=img_shape)
validity = model(img)
return Model(img, validity)
def train(self, epochs, batch_size=128, save_interval=50):
# Load the dataset
X_train = self.load_data()
half_batch = int(batch_size / 2)
for epoch in range(epochs):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random half batch of images
idx = np.random.randint(0, X_train.shape[0], half_batch)
imgs = X_train[idx]
# Sample noise and generate a half batch of new images
noise = np.random.normal(0, 1, (half_batch, 200))
gen_imgs = self.generator.predict(noise)
# Train the discriminator (real classified as ones and generated as zeros)
d_loss_real = self.discriminator.train_on_batch(imgs, np.ones((half_batch, 1)))
d_loss_fake = self.discriminator.train_on_batch(gen_imgs, np.zeros((half_batch, 1)))
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator
# ---------------------
noise = np.random.normal(0, 1, (batch_size, 200))
# Train the generator (wants discriminator to mistake images as real)
g_loss = self.combined.train_on_batch(noise, np.ones((batch_size, 1)))
# Plot the progress
print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))
# If at save interval => save generated image samples
if epoch % save_interval == 0:
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
self.save_imgs(epoch,imgs)
def save_imgs(self, epoch,imgs):
if not(os.path.exists(self.save_img_folder)):
os.makedirs(self.save_img_folder)
r, c = 5, 5
z = np.random.normal(size=(25, self.latent_dim))
gen_imgs = self.generator.predict(z)
gen_imgs = 0.5 * gen_imgs + 0.5
# z_imgs = self.encoder.predict(imgs)
# gen_enc_imgs = self.generator.predict(z_imgs)
# gen_enc_imgs = 0.5 * gen_enc_imgs + 0.5
fig, axs = plt.subplots(r, c)
cnt = 0
for i in range(r):
for j in range(c):
self.plot(axs[i,j],gen_imgs[cnt, :,:,:].squeeze())
cnt += 1
print('----- Saving generated -----')
if isinstance(epoch, str):
fig.savefig(self.save_img_folder + "celeba_{}.png".format(epoch))
else:
fig.savefig(self.save_img_folder + "celeba_%d.png" % epoch)
plt.close()
# fig, axs = plt.subplots(r, c)
# cnt = 0
# for i in range(r):
# for j in range(c):
# self.plot(axs[i,j],gen_enc_imgs[cnt, :,:,:].squeeze())
# cnt += 1
# print('----- Saving encoded -----')
# if isinstance(epoch, str):
# fig.savefig(self.save_img_folder + "celeba_{}_enc.png".format(epoch))
# else :
# fig.savefig(self.save_img_folder + "celeba_%d_enc.png" % epoch)
# plt.close()
fig, axs = plt.subplots(r, c)
cnt = 0
imgs = imgs * 0.5 + 0.5
for i in range(r):
for j in range(c):
self.plot(axs[i,j],imgs[cnt, :,:,:].squeeze())
cnt += 1
print('----- Saving real -----')
if isinstance(epoch, str):
fig.savefig(self.save_img_folder + "celeba_{}_real.png".format(epoch))
else :
fig.savefig(self.save_img_folder + "celeba_%d_real.png" % epoch)
plt.close()
def load_data(self):
self.dataPath = 'D:\Code\data\sceleba.npy'
print('----- Loading CelebA -------')
X_train = np.load(self.dataPath)
X_train = X_train.transpose([0,2,3,1])
X_train = (X_train.astype(np.float32) - 0.5) / 0.5
print('CelebA shape:', X_train.shape, X_train.min(), X_train.max())
print('------- CelebA loaded -------')
return X_train
def plot(self, fig, img):
if self.channels == 1:
fig.imshow(img,cmap=self.cmap)
fig.axis('off')
else:
fig.imshow(img)
fig.axis('off')
if __name__ == '__main__':
dcgan = DCGAN()
dcgan.train(epochs=50001, batch_size=64, save_interval=100)
| mit |
johankaito/fufuka | graph-tool/doc/sphinxext/docscrape_sphinx.py | 4 | 7799 | import re, inspect, textwrap, pydoc
import sphinx
from .docscrape import NumpyDocString, FunctionDoc, ClassDoc
import collections
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif isinstance(obj, collections.Callable):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| apache-2.0 |
stevenzhang18/Indeed-Flask | lib/pandas/io/tests/test_gbq.py | 9 | 23045 | from datetime import datetime
import nose
import pytz
import platform
from time import sleep
import numpy as np
from distutils.version import StrictVersion
from pandas import compat
from pandas import NaT
from pandas.compat import u, range
from pandas.core.frame import DataFrame
import pandas.io.gbq as gbq
import pandas.util.testing as tm
PROJECT_ID = None
DATASET_ID = 'pydata_pandas_bq_testing'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
_IMPORTS = False
_GOOGLE_API_CLIENT_INSTALLED = False
_GOOGLE_API_CLIENT_VALID_VERSION = False
_HTTPLIB2_INSTALLED = False
_SETUPTOOLS_INSTALLED = False
def _test_imports():
global _GOOGLE_API_CLIENT_INSTALLED, _GOOGLE_API_CLIENT_VALID_VERSION, \
_HTTPLIB2_INSTALLED, _SETUPTOOLS_INSTALLED
try:
import pkg_resources
_SETUPTOOLS_INSTALLED = True
except ImportError:
_SETUPTOOLS_INSTALLED = False
if compat.PY3:
google_api_minimum_version = '1.4.1'
else:
google_api_minimum_version = '1.2.0'
if _SETUPTOOLS_INSTALLED:
try:
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.client import AccessTokenRefreshError
from oauth2client.file import Storage
from oauth2client.tools import run_flow
_GOOGLE_API_CLIENT_INSTALLED=True
_GOOGLE_API_CLIENT_VERSION = pkg_resources.get_distribution('google-api-python-client').version
if StrictVersion(_GOOGLE_API_CLIENT_VERSION) >= StrictVersion(google_api_minimum_version):
_GOOGLE_API_CLIENT_VALID_VERSION = True
except ImportError:
_GOOGLE_API_CLIENT_INSTALLED = False
try:
import httplib2
_HTTPLIB2_INSTALLED = True
except ImportError:
_HTTPLIB2_INSTALLED = False
if not _SETUPTOOLS_INSTALLED:
raise ImportError('Could not import pkg_resources (setuptools).')
if not _GOOGLE_API_CLIENT_INSTALLED:
raise ImportError('Could not import Google API Client.')
if not _GOOGLE_API_CLIENT_VALID_VERSION:
raise ImportError("pandas requires google-api-python-client >= {0} for Google BigQuery support, "
"current version {1}".format(google_api_minimum_version, _GOOGLE_API_CLIENT_VERSION))
if not _HTTPLIB2_INSTALLED:
raise ImportError("pandas requires httplib2 for Google BigQuery support")
def test_requirements():
try:
_test_imports()
except (ImportError, NotImplementedError) as import_exception:
raise nose.SkipTest(import_exception)
def clean_gbq_environment():
dataset = gbq._Dataset(PROJECT_ID)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = gbq._Table(PROJECT_ID, dataset_id)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona')) for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
def test_generate_bq_schema_deprecated():
# 11121 Deprecation of generate_bq_schema
with tm.assert_produces_warning(FutureWarning):
df = make_mixed_dataframe_v2(10)
gbq.generate_bq_schema(df)
class TestGBQConnectorIntegration(tm.TestCase):
def setUp(self):
test_requirements()
if not PROJECT_ID:
raise nose.SkipTest("Cannot run integration tests without a project id")
self.sut = gbq.GbqConnector(PROJECT_ID)
def test_should_be_able_to_make_a_connector(self):
self.assertTrue(self.sut is not None, 'Could not create a GbqConnector')
def test_should_be_able_to_get_valid_credentials(self):
credentials = self.sut.get_credentials()
self.assertFalse(credentials.invalid, 'Returned credentials invalid')
def test_should_be_able_to_get_a_bigquery_service(self):
credentials = self.sut.get_credentials()
bigquery_service = self.sut.get_service(credentials)
self.assertTrue(bigquery_service is not None, 'No service returned')
def test_should_be_able_to_get_schema_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(schema is not None)
def test_should_be_able_to_get_results_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(pages is not None)
class TestReadGBQUnitTests(tm.TestCase):
def setUp(self):
test_requirements()
def test_should_return_bigquery_integers_as_python_floats(self):
result = gbq._parse_entry(1, 'INTEGER')
tm.assert_equal(result, float(1))
def test_should_return_bigquery_floats_as_python_floats(self):
result = gbq._parse_entry(1, 'FLOAT')
tm.assert_equal(result, float(1))
def test_should_return_bigquery_timestamps_as_numpy_datetime(self):
result = gbq._parse_entry('0e9', 'TIMESTAMP')
tm.assert_equal(result, np.datetime64('1970-01-01T00:00:00Z'))
def test_should_return_bigquery_booleans_as_python_booleans(self):
result = gbq._parse_entry('false', 'BOOLEAN')
tm.assert_equal(result, False)
def test_should_return_bigquery_strings_as_python_strings(self):
result = gbq._parse_entry('STRING', 'STRING')
tm.assert_equal(result, 'STRING')
def test_to_gbq_should_fail_if_invalid_table_name_passed(self):
with tm.assertRaises(gbq.NotFoundException):
gbq.to_gbq(DataFrame(), 'invalid_table_name', project_id="1234")
def test_to_gbq_with_no_project_id_given_should_fail(self):
with tm.assertRaises(TypeError):
gbq.to_gbq(DataFrame(), 'dataset.tablename')
def test_read_gbq_with_no_project_id_given_should_fail(self):
with tm.assertRaises(TypeError):
gbq.read_gbq('SELECT "1" as NUMBER_1')
def test_that_parse_data_works_properly(self):
test_schema = {'fields': [{'mode': 'NULLABLE', 'name': 'VALID_STRING', 'type': 'STRING'}]}
test_page = [{'f': [{'v': 'PI'}]}]
test_output = gbq._parse_data(test_schema, test_page)
correct_output = DataFrame({'VALID_STRING': ['PI']})
tm.assert_frame_equal(test_output, correct_output)
class TestReadGBQIntegration(tm.TestCase):
@classmethod
def setUpClass(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE* executing *ALL* tests
# described below.
if not PROJECT_ID:
raise nose.SkipTest("Cannot run integration tests without a project id")
test_requirements()
def setUp(self):
# - PER-TEST FIXTURES -
# put here any instruction you want to be run *BEFORE* *EVERY* test is executed.
pass
@classmethod
def tearDownClass(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER* executing all tests.
pass
def tearDown(self):
# - PER-TEST FIXTURES -
# put here any instructions you want to be run *AFTER* *EVERY* test is executed.
pass
def test_should_properly_handle_valid_strings(self):
query = 'SELECT "PI" as VALID_STRING'
df = gbq.read_gbq(query, project_id=PROJECT_ID)
tm.assert_frame_equal(df, DataFrame({'VALID_STRING': ['PI']}))
def test_should_properly_handle_empty_strings(self):
query = 'SELECT "" as EMPTY_STRING'
df = gbq.read_gbq(query, project_id=PROJECT_ID)
tm.assert_frame_equal(df, DataFrame({'EMPTY_STRING': [""]}))
def test_should_properly_handle_null_strings(self):
query = 'SELECT STRING(NULL) as NULL_STRING'
df = gbq.read_gbq(query, project_id=PROJECT_ID)
tm.assert_frame_equal(df, DataFrame({'NULL_STRING': [None]}))
def test_should_properly_handle_valid_integers(self):
query = 'SELECT INTEGER(3) as VALID_INTEGER'
df = gbq.read_gbq(query, project_id=PROJECT_ID)
tm.assert_frame_equal(df, DataFrame({'VALID_INTEGER': [3]}))
def test_should_properly_handle_null_integers(self):
query = 'SELECT INTEGER(NULL) as NULL_INTEGER'
df = gbq.read_gbq(query, project_id=PROJECT_ID)
tm.assert_frame_equal(df, DataFrame({'NULL_INTEGER': [np.nan]}))
def test_should_properly_handle_valid_floats(self):
query = 'SELECT PI() as VALID_FLOAT'
df = gbq.read_gbq(query, project_id=PROJECT_ID)
tm.assert_frame_equal(df, DataFrame({'VALID_FLOAT': [3.141592653589793]}))
def test_should_properly_handle_null_floats(self):
query = 'SELECT FLOAT(NULL) as NULL_FLOAT'
df = gbq.read_gbq(query, project_id=PROJECT_ID)
tm.assert_frame_equal(df, DataFrame({'NULL_FLOAT': [np.nan]}))
def test_should_properly_handle_timestamp_unix_epoch(self):
query = 'SELECT TIMESTAMP("1970-01-01 00:00:00") as UNIX_EPOCH'
df = gbq.read_gbq(query, project_id=PROJECT_ID)
tm.assert_frame_equal(df, DataFrame({'UNIX_EPOCH': [np.datetime64('1970-01-01T00:00:00.000000Z')]}))
def test_should_properly_handle_arbitrary_timestamp(self):
query = 'SELECT TIMESTAMP("2004-09-15 05:00:00") as VALID_TIMESTAMP'
df = gbq.read_gbq(query, project_id=PROJECT_ID)
tm.assert_frame_equal(df, DataFrame({'VALID_TIMESTAMP': [np.datetime64('2004-09-15T05:00:00.000000Z')]}))
def test_should_properly_handle_null_timestamp(self):
query = 'SELECT TIMESTAMP(NULL) as NULL_TIMESTAMP'
df = gbq.read_gbq(query, project_id=PROJECT_ID)
tm.assert_frame_equal(df, DataFrame({'NULL_TIMESTAMP': [NaT]}))
def test_should_properly_handle_true_boolean(self):
query = 'SELECT BOOLEAN(TRUE) as TRUE_BOOLEAN'
df = gbq.read_gbq(query, project_id=PROJECT_ID)
tm.assert_frame_equal(df, DataFrame({'TRUE_BOOLEAN': [True]}))
def test_should_properly_handle_false_boolean(self):
query = 'SELECT BOOLEAN(FALSE) as FALSE_BOOLEAN'
df = gbq.read_gbq(query, project_id=PROJECT_ID)
tm.assert_frame_equal(df, DataFrame({'FALSE_BOOLEAN': [False]}))
def test_should_properly_handle_null_boolean(self):
query = 'SELECT BOOLEAN(NULL) as NULL_BOOLEAN'
df = gbq.read_gbq(query, project_id=PROJECT_ID)
tm.assert_frame_equal(df, DataFrame({'NULL_BOOLEAN': [None]}))
def test_unicode_string_conversion_and_normalization(self):
correct_test_datatype = DataFrame(
{'UNICODE_STRING': [u("\xe9\xfc")]}
)
unicode_string = "\xc3\xa9\xc3\xbc"
if compat.PY3:
unicode_string = unicode_string.encode('latin-1').decode('utf8')
query = 'SELECT "{0}" as UNICODE_STRING'.format(unicode_string)
df = gbq.read_gbq(query, project_id=PROJECT_ID)
tm.assert_frame_equal(df, correct_test_datatype)
def test_index_column(self):
query = "SELECT 'a' as STRING_1, 'b' as STRING_2"
result_frame = gbq.read_gbq(query, project_id=PROJECT_ID, index_col="STRING_1")
correct_frame = DataFrame({'STRING_1': ['a'], 'STRING_2': ['b']}).set_index("STRING_1")
tm.assert_equal(result_frame.index.name, correct_frame.index.name)
def test_column_order(self):
query = "SELECT 'a' as STRING_1, 'b' as STRING_2, 'c' as STRING_3"
col_order = ['STRING_3', 'STRING_1', 'STRING_2']
result_frame = gbq.read_gbq(query, project_id=PROJECT_ID, col_order=col_order)
correct_frame = DataFrame({'STRING_1': ['a'], 'STRING_2': ['b'], 'STRING_3': ['c']})[col_order]
tm.assert_frame_equal(result_frame, correct_frame)
def test_column_order_plus_index(self):
query = "SELECT 'a' as STRING_1, 'b' as STRING_2, 'c' as STRING_3"
col_order = ['STRING_3', 'STRING_2']
result_frame = gbq.read_gbq(query, project_id=PROJECT_ID, index_col='STRING_1', col_order=col_order)
correct_frame = DataFrame({'STRING_1': ['a'], 'STRING_2': ['b'], 'STRING_3': ['c']})
correct_frame.set_index('STRING_1', inplace=True)
correct_frame = correct_frame[col_order]
tm.assert_frame_equal(result_frame, correct_frame)
def test_malformed_query(self):
with tm.assertRaises(gbq.GenericGBQException):
gbq.read_gbq("SELCET * FORM [publicdata:samples.shakespeare]", project_id=PROJECT_ID)
def test_bad_project_id(self):
with tm.assertRaises(gbq.GenericGBQException):
gbq.read_gbq("SELECT 1", project_id='001')
def test_bad_table_name(self):
with tm.assertRaises(gbq.GenericGBQException):
gbq.read_gbq("SELECT * FROM [publicdata:samples.nope]", project_id=PROJECT_ID)
def test_download_dataset_larger_than_200k_rows(self):
test_size = 200005
# Test for known BigQuery bug in datasets larger than 100k rows
# http://stackoverflow.com/questions/19145587/bq-py-not-paging-results
df = gbq.read_gbq("SELECT id FROM [publicdata:samples.wikipedia] GROUP EACH BY id ORDER BY id ASC LIMIT {0}".format(test_size),
project_id=PROJECT_ID)
self.assertEqual(len(df.drop_duplicates()), test_size)
def test_zero_rows(self):
# Bug fix for https://github.com/pydata/pandas/issues/10273
df = gbq.read_gbq("SELECT title, language FROM [publicdata:samples.wikipedia] where timestamp=-9999999",
project_id=PROJECT_ID)
expected_result = DataFrame(columns=['title', 'language'])
self.assert_frame_equal(df, expected_result)
class TestToGBQIntegration(tm.TestCase):
# Changes to BigQuery table schema may take up to 2 minutes as of May 2015
# As a workaround to this issue, each test should use a unique table name.
# Make sure to modify the for loop range in the tearDownClass when a new test is added
# See `Issue 191 <https://code.google.com/p/google-bigquery/issues/detail?id=191>`__
@classmethod
def setUpClass(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE* executing *ALL* tests
# described below.
if not PROJECT_ID:
raise nose.SkipTest("Cannot run integration tests without a project id")
test_requirements()
clean_gbq_environment()
gbq._Dataset(PROJECT_ID).create(DATASET_ID + "1")
def setUp(self):
# - PER-TEST FIXTURES -
# put here any instruction you want to be run *BEFORE* *EVERY* test is executed.
self.dataset = gbq._Dataset(PROJECT_ID)
self.table = gbq._Table(PROJECT_ID, DATASET_ID + "1")
@classmethod
def tearDownClass(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER* executing all tests.
clean_gbq_environment()
def tearDown(self):
# - PER-TEST FIXTURES -
# put here any instructions you want to be run *AFTER* *EVERY* test is executed.
pass
def test_upload_data(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 1000001
df = make_mixed_dataframe_v2(test_size)
gbq.to_gbq(df, destination_table, PROJECT_ID, chunksize=10000)
sleep(60) # <- Curses Google!!!
result = gbq.read_gbq("SELECT COUNT(*) as NUM_ROWS FROM {0}".format(destination_table),
project_id=PROJECT_ID)
self.assertEqual(result['NUM_ROWS'][0], test_size)
def test_upload_data_if_table_exists_fail(self):
destination_table = DESTINATION_TABLE + "2"
test_size = 10
df = make_mixed_dataframe_v2(test_size)
self.table.create(TABLE_ID + "2", gbq._generate_bq_schema(df))
# Test the default value of if_exists is 'fail'
with tm.assertRaises(gbq.TableCreationError):
gbq.to_gbq(df, destination_table, PROJECT_ID)
# Test the if_exists parameter with value 'fail'
with tm.assertRaises(gbq.TableCreationError):
gbq.to_gbq(df, destination_table, PROJECT_ID, if_exists='fail')
def test_upload_data_if_table_exists_append(self):
destination_table = DESTINATION_TABLE + "3"
test_size = 10
df = make_mixed_dataframe_v2(test_size)
df_different_schema = tm.makeMixedDataFrame()
# Initialize table with sample data
gbq.to_gbq(df, destination_table, PROJECT_ID, chunksize=10000)
# Test the if_exists parameter with value 'append'
gbq.to_gbq(df, destination_table, PROJECT_ID, if_exists='append')
sleep(60) # <- Curses Google!!!
result = gbq.read_gbq("SELECT COUNT(*) as NUM_ROWS FROM {0}".format(destination_table), project_id=PROJECT_ID)
self.assertEqual(result['NUM_ROWS'][0], test_size * 2)
# Try inserting with a different schema, confirm failure
with tm.assertRaises(gbq.InvalidSchema):
gbq.to_gbq(df_different_schema, destination_table, PROJECT_ID, if_exists='append')
def test_upload_data_if_table_exists_replace(self):
destination_table = DESTINATION_TABLE + "4"
test_size = 10
df = make_mixed_dataframe_v2(test_size)
df_different_schema = tm.makeMixedDataFrame()
# Initialize table with sample data
gbq.to_gbq(df, destination_table, PROJECT_ID, chunksize=10000)
# Test the if_exists parameter with the value 'replace'.
gbq.to_gbq(df_different_schema, destination_table, PROJECT_ID, if_exists='replace')
sleep(60) # <- Curses Google!!!
result = gbq.read_gbq("SELECT COUNT(*) as NUM_ROWS FROM {0}".format(destination_table), project_id=PROJECT_ID)
self.assertEqual(result['NUM_ROWS'][0], 5)
def test_google_upload_errors_should_raise_exception(self):
destination_table = DESTINATION_TABLE + "5"
test_timestamp = datetime.now(pytz.timezone('US/Arizona'))
bad_df = DataFrame({'bools': [False, False], 'flts': [0.0, 1.0], 'ints': [0, '1'], 'strs': ['a', 1],
'times': [test_timestamp, test_timestamp]}, index=range(2))
with tm.assertRaises(gbq.StreamingInsertError):
gbq.to_gbq(bad_df, destination_table, PROJECT_ID, verbose=True)
def test_generate_schema(self):
df = tm.makeMixedDataFrame()
schema = gbq._generate_bq_schema(df)
test_schema = {'fields': [{'name': 'A', 'type': 'FLOAT'},
{'name': 'B', 'type': 'FLOAT'},
{'name': 'C', 'type': 'STRING'},
{'name': 'D', 'type': 'TIMESTAMP'}]}
self.assertEqual(schema, test_schema)
def test_create_table(self):
destination_table = TABLE_ID + "6"
test_schema = {'fields': [{'name': 'A', 'type': 'FLOAT'}, {'name': 'B', 'type': 'FLOAT'},
{'name': 'C', 'type': 'STRING'}, {'name': 'D', 'type': 'TIMESTAMP'}]}
self.table.create(destination_table, test_schema)
self.assertTrue(self.table.exists(destination_table), 'Expected table to exist')
def test_table_does_not_exist(self):
self.assertTrue(not self.table.exists(TABLE_ID + "7"), 'Expected table not to exist')
def test_delete_table(self):
destination_table = TABLE_ID + "8"
test_schema = {'fields': [{'name': 'A', 'type': 'FLOAT'}, {'name': 'B', 'type': 'FLOAT'},
{'name': 'C', 'type': 'STRING'}, {'name': 'D', 'type': 'TIMESTAMP'}]}
self.table.create(destination_table, test_schema)
self.table.delete(destination_table)
self.assertTrue(not self.table.exists(destination_table), 'Expected table not to exist')
def test_list_table(self):
destination_table = TABLE_ID + "9"
test_schema = {'fields': [{'name': 'A', 'type': 'FLOAT'}, {'name': 'B', 'type': 'FLOAT'},
{'name': 'C', 'type': 'STRING'}, {'name': 'D', 'type': 'TIMESTAMP'}]}
self.table.create(destination_table, test_schema)
self.assertTrue(destination_table in self.dataset.tables(DATASET_ID + "1"),
'Expected table list to contain table {0}'.format(destination_table))
def test_list_dataset(self):
dataset_id = DATASET_ID + "1"
self.assertTrue(dataset_id in self.dataset.datasets(),
'Expected dataset list to contain dataset {0}'.format(dataset_id))
def test_list_table_zero_results(self):
dataset_id = DATASET_ID + "2"
self.dataset.create(dataset_id)
table_list = gbq._Dataset(PROJECT_ID).tables(dataset_id)
self.assertEqual(len(table_list), 0, 'Expected gbq.list_table() to return 0')
def test_create_dataset(self):
dataset_id = DATASET_ID + "3"
self.dataset.create(dataset_id)
self.assertTrue(dataset_id in self.dataset.datasets(), 'Expected dataset to exist')
def test_delete_dataset(self):
dataset_id = DATASET_ID + "4"
self.dataset.create(dataset_id)
self.dataset.delete(dataset_id)
self.assertTrue(dataset_id not in self.dataset.datasets(), 'Expected dataset not to exist')
def test_dataset_exists(self):
dataset_id = DATASET_ID + "5"
self.dataset.create(dataset_id)
self.assertTrue(self.dataset.exists(dataset_id), 'Expected dataset to exist')
def create_table_data_dataset_does_not_exist(self):
dataset_id = DATASET_ID + "6"
table_id = TABLE_ID + "1"
table_with_new_dataset = gbq._Table(PROJECT_ID, dataset_id)
df = make_mixed_dataframe_v2(10)
table_with_new_dataset.create(table_id, gbq._generate_bq_schema(df))
self.assertTrue(self.dataset.exists(dataset_id), 'Expected dataset to exist')
self.assertTrue(table_with_new_dataset.exists(table_id), 'Expected dataset to exist')
def test_dataset_does_not_exist(self):
self.assertTrue(not self.dataset.exists(DATASET_ID + "_not_found"), 'Expected dataset not to exist')
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| apache-2.0 |
sjuvekar/Kaggle-Expedia-Raking | PythonBenchmark/feature_extractor.py | 1 | 2190 | import argparse
import pandas
import data_io
import multiprocessing
from features import *
G_PROCESS_POOL = multiprocessing.Pool()
class FeatureExtractor:
def __init__(self, X):
self.X = X
print "Data Size = ", self.X.shape
def transformer(self, fet):
return fet.features()
def feature_extractor(self):
feature_list = [
site_id_features.SiteIdFeatures(self.X),
visitor_location_id_features.VisitorLocationIdFeatures(self.X),
visitor_starrating_features.VisitorStarratingFeatures(self.X),
visitor_adr_usd_features.VisitorAdrUsdFeatures(self.X),
prop_country_id_features.PropCountryIdFeatures(self.X),
prop_id_features.PropIdFeatures(self.X),
prop_starrating_features.PropStarratingFeatures(self.X),
prop_review_features.PropReviewFeatures(self.X),
prop_location_score1_features.PropLocationScore1Features(self.X),
prop_location_score2_features.PropLocationScore2Features(self.X),
prop_log_historical_price_features.PropLogHistoricalPriceFeatures(self.X),
price_usd_features.PriceUsdFeatures(self.X),
srch_destination_id_features.SrchDestinationIdFeatures(self.X),
srch_length_of_stay_features.SrchLengthOfStayFeatures(self.X),
srch_booking_window_features.SrchBookingWindowFeatures(self.X),
]
return map(self.transformer, feature_list)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate features using train/test data")
parser.add_argument("--test", action="store_true", default=False, help="Weather to use test data", required=False)
result = parser.parse_args()
if result.test:
print("Reading test data")
data = data_io.read_test()
else:
print("Reading training data")
data = data_io.read_train()
fm = FeatureExtractor(data)
derived_features = fm.feature_extractor()
data.fillna(0, inplace=True)
data = pandas.concat([data] + derived_features, axis=1)
if result.test:
data_io.save_test_features(data)
else:
data_io.save_train_features(data)
| bsd-3-clause |
agnusfeec/tattCBIR | lib_sistema.py | 1 | 25313 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 14 13:36:05 2016
@author: agnus
"""
#%%
def monta_lista_imagens(path = '.', ext='.png'):
import os
imagens = {}
for dirname, dirnames, filenames in os.walk(path):
# print path to all filenames with extension py.
for filename in filenames:
fname_path = os.path.join(dirname, filename)
fext = os.path.splitext(fname_path)[1]
if fext == ext:
#file_dat = [filename, dirname]
#imagens.append(file_dat)
imagens[filename]=dirname
else:
continue
return imagens
#%%
def grava_db_imagens(arquivo, imagens):
#arquivo = './tatt_c.db'
with open(arquivo, 'wb') as db_image_file:
for nome_img, caminho in imagens.items():
db_image_file.write(nome_img+ '\t' + caminho + '\n')
db_image_file.close()
#%%
def grava_config(arquivo = './example_mem.cfg'):
import ConfigParser
config = ConfigParser.RawConfigParser()
# When adding sections or items, add them in the reverse order of
# how you want them to be displayed in the actual file.
# In addition, please note that using RawConfigParser's and the raw
# mode of ConfigParser's respective set functions, you can assign
# non-string values to keys internally, but will receive an error
# when attempting to write to a file or when you get it in non-raw
# mode. SafeConfigParser does not allow such assignments to take place.
config.add_section('Geral')
config.set('Geral', 'Image Database', 'Tatt-C')
config.set('Geral', 'Database Image Folder', '/media/sf_Projeto/dataset/tatt_dca/')
config.set('Geral', 'Indexa image database', 'True')
config.set('Geral', 'Database filename', './tatt_c.db')
config.set('Geral', 'Image filename extension','.jpg')
config.set('Geral', 'Training File', 'train1')
config.set('Geral', 'Testing File', 'test1')
config.add_section('Folds')
config.set('Folds', 'Folds Folder', '/media/sf_Projeto/dataset/tatt_dca/folds/')
config.set('Folds', 'Quantidade subsets', '3')
config.set('Folds', 'Subset_1', 'gallery{1}.txt')
config.set('Folds', 'Subset_2', 'probes{1}.txt')
config.set('Folds', 'Subset_3', 'bg{1}.txt')
config.set('Folds', 'Ground_truth', 'ground_truth.txt')
config.add_section('SIFT')
config.set('SIFT','SIFT Folder', '/media/sf_Projeto/dataset/tatt_dca/SIFT/')
# Writing our configuration file to 'example.cfg'
with open(arquivo, 'wb') as configfile:
config.write(configfile)
#%%
def folds_construct(subsets, folds_folder):
n_folds =len(subsets[0])
n_subsets = len(subsets)
folds = []
for i in range(n_folds):
sub = []
for j in range(n_subsets):
arquivo = subsets[j][i]
aux = []
with open(folds_folder+arquivo, 'r') as imagefiles:
for nomef in imagefiles:
if nomef[-1] == '\n' : nomef = nomef[:-1]
aux.append(nomef)
imagefiles.close()
sub.append(aux)
folds.append(sub)
return folds
#%%
def le_config():
import ConfigParser
config = ConfigParser.RawConfigParser()
config.read('./example_mem.cfg')
# getfloat() raises an exception if the value is not a float
# getint() and getboolean() also do this for their respective types
base = config.get('Geral', 'image database')
indexa = config.getboolean('Geral', 'indexa image database')
print base
if indexa:
print "indexa base"
arquivo = config.get('Geral','database filename')
caminho = config.get('Geral', 'database image folder')
extensao = config.get('Geral', 'image filename extension')
print arquivo, caminho, extensao
imagens = monta_lista_imagens(caminho, extensao)
grava_db_imagens(arquivo, imagens)
folds_folder = config.get('Folds','folds folder')
n_subsets = config.getint('Folds', 'quantidade subsets')
subsets=[]
for i in range(n_subsets):
sub = config.get('Folds', 'subset_'+str(i+1))
ps = sub.find("{")
pe = sub.find("}")
ped = sub[ps+1:pe]
indices = ped.split(',')
aux = []
for ind in indices:
aux.append(sub[:ps]+ind+'.txt') # incluir extensão variável
subsets.append(aux)
#print subsets
#n_folds = config.getint('Folds', 'quantidade folds')
n_folds =len(subsets[0])
folds = []
for i in range(n_folds):
sub = []
for j in range(n_subsets):
arquivo = subsets[j][i]
aux = []
with open(folds_folder+arquivo, 'r') as imagefiles:
for nomef in imagefiles:
if nomef[-1] == '\n' : nomef = nomef[:-1]
aux.append(nomef)
imagefiles.close()
sub.append(aux)
folds.append(sub)
#print folds[0]
gt_filename = config.get('Folds', 'ground_truth')
sift_folder = config.get('SIFT', 'sift folder')
print sift_folder, folds_folder, caminho
return (folds, imagens, gt_filename, sift_folder, folds_folder, caminho, subsets)
#%%
def sift(nomes_imagens, imagens, sift_folder):
import cv2
import os
from math import sqrt
#ds = []
#kp = []
t = len(nomes_imagens)
i=1
for filename in nomes_imagens:
fname = os.path.join(sift_folder, filename[:-3]+'sift_ds')
if os.path.isfile(fname) == False :
print filename
#file_img = os.path.join(diretorio, filename)
diretorio = imagens[filename]
img = cv2.imread(os.path.join(diretorio, filename)) #file_img)
# Redimensiona imagem para aplicação do Fisher Vectors
#img = cv2.resize(img, (256,256))
aux = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(aux)
k = sqrt((240.0*480.0*0.5)/(gray.shape[0]*gray.shape[1]))
res = cv2.resize(gray,None,fx=k, fy=k, interpolation = cv2.INTER_CUBIC)
cv2.imwrite("/media/sf_Projeto/dataset/tatt_dca//img_Reduzido/"+filename,res)
sift = cv2.xfeatures2d.SIFT_create()
(kps, descs) = sift.detectAndCompute(res, None)
#ds.append(descs)
#kp.append(kps)
arquivo = os.path.join(sift_folder, filename[:-3]+'sift_ds')
with open(arquivo, 'wb') as sift_file:
for desc in descs:
sift_file.write(','.join(str(x) for x in desc)+'\n')
sift_file.close()
arquivo = os.path.join(sift_folder, filename[:-3]+'sift_kp')
with open(arquivo, 'wb') as sift_file:
for point in kps:
temp = [point.pt[0], point.pt[1], point.size, point.angle,
point.response, point.octave, point.class_id]
sift_file.write(','.join(str(x) for x in temp)+'\n')
sift_file.close()
print (i*100)/t,
i=i+1
#return ds
#%%
def sift_match(ds1, kp1, ds2, kp2):
import cv2
MIN_MATCH_COUNT = 10
bf = cv2.BFMatcher()
matches = bf.knnMatch(ds1,ds2, k=2)
# Apply ratio test
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
qm = len(good)
(nr1,c) = ds1.shape
(nr2,c) = ds2.shape
# if qm>MIN_MATCH_COUNT:
# src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
# dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
#
# M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
# if mask != None:
# matchesMask = mask.ravel().tolist()
# rt = np.sum(np.asarray(matchesMask))
# else:
# rt = 0
# else:
# #print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
# #matchesMask = None
# rt = 0
nr = nr1
if nr2>nr:
nr = nr2
rt = (100.0*qm/nr)
# if qm > 0:
# rt = 1.0/qm
# else:
# rt = 10^8
return rt
#%%
def gera_sift_base(folds, imagens, sift_folder):
# Inicialmente gera se necessario o SIFT para as imagens de treinamento e teste
# pode ser otimizado, gerando para toda a base, caso se utilize toda a base
# o que pode ter um custo alto pois na base existem imagens para outros casos
# de uso.
n_folds = len(folds)
#Poder ser implementado diferente pois as linhas abaixo apenas agregram os nomes
#das imagens para que sejam gerados os sifts para cada um dos folds
for i in range(n_folds):
test = folds[i][1]
train = folds[i][0]
bg = folds[i][2]
for j in range(n_folds):
if j!=i :
train = train + folds[j][0]+folds[j][1]+folds[j][2]
print 'Gerando sift do conjunto de treinamento'
#train_kp, train_ds = sift(train, imagens, sift_folder)
sift(train, imagens, sift_folder)
print 'Gerando sift do conjunto de teste'
#test_kp, test_ds = sift(test, imagens)
sift(test, imagens, sift_folder)
print 'Gerando sift do conjunto de bg'
#bg_kp, bg_ds = sift(bg, imagens)
sift(bg, imagens, sift_folder)
#%%
def processa_sift(folds, imagens, sift_folder):
import numpy as np
import os
import cv2
n_folds = len(folds)
#Alterei para que inclua nas imagens da galeria i no conj. train, de forma a que as
# imagens correspondentes ao probe existam na galeria (train)
for i in range(n_folds):
test = folds[i][1]
bg = folds[i][2]
train = folds[i][0]#+bg
for j in range(n_folds):
if j!=i :
train = train + folds[j][0]+folds[j][1]+folds[j][2]
n_test = len(test)
n_train = len(train)
dist = np.zeros((n_train), dtype=np.float)
nn = n_test * n_train
print 'Gerando o match entre o treinamento e o conjunto de teste'
mem = True
if mem==True :
ds=[]
ks=[]
arquivo = './clist_mem_'+str(i+1)+'.txt'
with open(arquivo, 'w') as clist_file:
l = 0
for file_test in test:
fname = os.path.join(sift_folder, file_test[:-3]+'sift_ds')
ds1 = (np.loadtxt(open(fname,"r"),delimiter=",")).astype(np.uint8) #,skiprows=1)
fname = os.path.join(sift_folder, file_test[:-3]+'sift_kp')
kps = (np.loadtxt(open(fname,"r"),delimiter=",")).astype(np.float) #,skiprows=1)
kp1=[]
kp2=[]
for kp in kps:
kpoint = cv2.KeyPoint(float(kp[0]), float(kp[1]),
float(kp[2]), float(kp[3]),
float(kp[4]), int(kp[5]), int(kp[6]))
kp1.append(kpoint)
diretorio = imagens[file_test]
img1 = cv2.imread(os.path.join(diretorio, file_test),0)
#print os.path.join(diretorio, file_test)
j = 0
for file_train in train:
diretorio = imagens[file_train]
img2 = cv2.imread(os.path.join(diretorio, file_train),0)
#print os.path.join(diretorio, file_train)
if (mem == True and len(ds)<len(train)):
fname = os.path.join(sift_folder, file_train[:-3]+'sift_ds')
ds.append ( np.asarray((np.loadtxt(open(fname,"r"),delimiter=",")).astype(np.uint8)) ) #,skiprows=1)
ds2 = ds[j]
fname = os.path.join(sift_folder, file_train[:-3]+'sift_kp')
kps = (np.loadtxt(open(fname,"r"),delimiter=",")).astype(np.float) #,skiprows=1)
aux =[]
for kp in kps:
kpoint = cv2.KeyPoint(float(kp[0]), float(kp[1]),
float(kp[2]), float(kp[3]),
float(kp[4]), int(kp[5]), int(kp[6]))
aux.append(kpoint)
ks.append(aux)
kp2 = ks[j]
elif (mem == True and len(ds)==len(train)):
ds2 = ds[j]
kp2 = ks[j]
elif mem == False:
fname = os.path.join(sift_folder, file_train[:-3]+'sift_ds')
ds2 = ( (np.loadtxt(open(fname,"r"),delimiter=",")).astype(np.uint8) )
fname = os.path.join(sift_folder, file_train[:-3]+'sift_kp')
kps = (np.loadtxt(open(fname,"r"),delimiter=",")).astype(np.float) #,skiprows=1)
kp2 = []
for kp in kps:
kpoint = cv2.KeyPoint(float(kp[0]), float(kp[1]),
float(kp[2]), float(kp[3]),
float(kp[4]), int(kp[5]), int(kp[6]))
kp2.append(kpoint)
#print ds1
#print ds2
rt = sift_match(ds1, np.asarray(kp1), ds2, np.asarray(kp2))
dist[j] = rt
j = j + 1
print i,(((l*n_train)+j)*100)/nn,
indice = np.argsort(dist)[::-1]
k = 1
for id in indice:
clist_file.write(file_test+'|'+ str(k) + '|' + train[id] + '|' + str(dist[id]) +'\n')
k = k + 1
l = l + 1
clist_file.close()
break
#%%
def ground_truth(folds_folder, gt_filename):
"""Reads a ground truth table from text file.
Keyword arguments:
folds_folder -- the path for the ground truth file
gt_filename -- the file name of the ground truth file with extension
Returns:
gt_images -- ground truth table stored in a dictionary
"""
#folds_folder = '/media/sf_Projeto/dataset/tatt-c_update_v1.4/5-fold/tattoo_identification/'
#gt_filename = 'ground_truth.txt'
gt_imagens = {}
with open(folds_folder+gt_filename, 'r') as gt_arq:
for nomef in gt_arq:
imgs = nomef.split('|')
if imgs[1][-1] == '\n' : imgs[1] = imgs[1][:-1]
#print imgs[0], imgs[1]
gt_imagens[imgs[0]] = imgs[1]
gt_arq.close()
return gt_imagens
#%%
def compute_cmc(arquivo, gt_imagens):
"""Reads a classification list from text file and sumarize rank results for
every image reference based in the ground truth dictionary.
Keyword arguments:
arquivo -- the filename of classification list file
gt_images -- ground truth table stored in a dictionary
Returns:
cmc -- acummulated accuracy for each rank stored in a numpy array
"""
import numpy as np
i = 0
acc = np.zeros(400)
#arquivo = './clist_mem_'+str(i+1)+'.txt'
with open(arquivo, 'r') as clist_file:
for nomef in clist_file:
imgs = nomef.split('|')
if imgs[3][-1] == '\n' : imgs[3] = imgs[3][:-1]
if gt_imagens[imgs[0]] == imgs[2] :
r = int(imgs[1])
acc[r] = acc[r]+1
clist_file.close()
#print cmc
ft = sum(acc)
#print cmc/ft
cmc = np.zeros(400)
for i in range(1,400):
cmc[i] = cmc[i-1]+acc[i]/ft
#print cmc1
return cmc
#%%
def plot_cmc(cmc, ni=200):
import matplotlib.pyplot as plt
import pylab as P
import numpy as np
fig = P.figure()
fig.suptitle('Acumulative Match Characteristic', fontsize=18, fontweight='bold')
P.ylabel('%', fontsize=16)
P.xlabel('Rank', fontsize=16)
P.xlim(0, ni)
P.ylim(0,101)
P.xticks(np.arange(0, ni, 10.0))
P.yticks(np.arange(0, 101, 5.0))
xticklabels = P.getp(P.gca(), 'xticklabels')
yticklabels = P.getp(P.gca(), 'yticklabels')
P.setp(yticklabels, 'color', 'k', fontsize='x-large')
P.setp(xticklabels, 'color', 'k', fontsize='x-large')
P.grid(True)
fig.set_size_inches(19,7)
#P.plot(cmc*100)
P.plot(cmc*100)
fig.savefig('cmc_bf_knn.png')
P.show()
#%%%
#Author: Jacob Gildenblat, 2014
#http://jacobcv.blogspot.com.br/2014/12/fisher-vector-in-python.html
#License: you may use this for whatever you like
#Adaptation: Agnus A. Horta
def fv_dictionary(descriptors, N):
import numpy as np
import cv2
em = cv2.ml.EM_create()
em.setClustersNumber(N)
#em = cv2.EM(N)
em.trainEM(descriptors)
return np.float32(em.getMeans()), \
np.float32(em.getCovs()), np.float32(em.getWeights())[0]
def fv_generate_gmm(descriptors, N, dt):
import numpy as np
words = np.concatenate(descriptors)
#np.concatenate([folder_descriptors(folder) for folder in glob.glob(input_folder + '*')])
#print("Training GMM of size", N)
means, covs, weights = fv_dictionary(words, N)
#Throw away gaussians with weights that are too small:
th = 1.0 / N
means = np.float32([m for k,m in zip(range(0, len(weights)), means) if weights[k] > th])
covs = np.float32([m for k,m in zip(range(0, len(weights)), covs) if weights[k] > th])
weights = np.float32([m for k,m in zip(range(0, len(weights)), weights) if weights[k] > th])
#print 'Means: ',means
#print 'Covs: ',covs
#print 'Weights: ',weights
np.save("./dat/means" + dt + ".gmm", means)
np.save("./dat/covs" + dt + ".gmm", covs)
np.save("./dat/weights" + dt + ".gmm", weights)
return means, covs, weights
def fv_load_gmm(dt, folder = "./dat"):
import numpy as np
files = ["means" + dt + ".gmm" +".npy", "covs" + dt + ".gmm.npy", "weights" + dt + ".gmm.npy"]
try:
return map(lambda file: np.load(file), map(lambda s : folder + "/" + s , files))
except IOError:
return (None, None, None)
def fv_likelihood_moment(x, ytk, moment):
import numpy as np
x_moment = np.power(np.float32(x), moment) if moment > 0 else np.float32([1])
return x_moment * ytk
def fv_likelihood_statistics(samples, means, covs, weights):
from scipy.stats import multivariate_normal
import numpy as np
gaussians, s0, s1,s2 = {}, {}, {}, {}
samples = zip(range(0, len(samples)), samples)
#print samples
g = [multivariate_normal(mean=means[k], cov=covs[k]) for k in range(0, len(weights)) ]
for index, x in samples:
gaussians[index] = np.array([g_k.pdf(x) for g_k in g])
for k in range(0, len(weights)):
s0[k], s1[k], s2[k] = 0, 0, 0
for index, x in samples:
probabilities = np.multiply(gaussians[index], weights)
probabilities = probabilities / np.sum(probabilities)
s0[k] = s0[k] + fv_likelihood_moment(x, probabilities[k], 0)
s1[k] = s1[k] + fv_likelihood_moment(x, probabilities[k], 1)
s2[k] = s2[k] + fv_likelihood_moment(x, probabilities[k], 2)
return s0, s1, s2
def fv_fisher_vector_weights(s0, s1, s2, means, covs, w, T):
import numpy as np
return np.float32([((s0[k] - T * w[k]) / np.sqrt(w[k]) ) for k in range(0, len(w))])
def fv_fisher_vector_means(s0, s1, s2, means, sigma, w, T):
import numpy as np
return np.float32([(s1[k] - means[k] * s0[k]) / (np.sqrt(w[k] * sigma[k])) for k in range(0, len(w))])
def fv_fisher_vector_sigma(s0, s1, s2, means, sigma, w, T):
import numpy as np
return np.float32([(s2[k] - 2 * means[k]*s1[k] + (means[k]*means[k] - sigma[k]) * s0[k]) / (np.sqrt(2*w[k])*sigma[k]) for k in range(0, len(w))])
def fv_normalize(fisher_vector):
import numpy as np
v = np.sqrt(abs(fisher_vector)) * np.sign(fisher_vector)
return v / np.sqrt(np.dot(v, v))
def fv_fisher_vector(samples, means, covs, w):
import numpy as np
#print 'fisher_vector(samples, means, covs, w)'
s0, s1, s2 = fv_likelihood_statistics(samples, means, covs, w)
T = samples.shape[0]
covs = np.float32([np.diagonal(covs[k]) for k in range(0, covs.shape[0])])
a = fv_fisher_vector_weights(s0, s1, s2, means, covs, w, T)
b = fv_fisher_vector_means(s0, s1, s2, means, covs, w, T)
c = fv_fisher_vector_sigma(s0, s1, s2, means, covs, w, T)
fv = np.concatenate([np.concatenate(a), np.concatenate(b), np.concatenate(c)])
fv = fv_normalize(fv)
#print 'fv = ', fv
return fv
def le_descritores(sift_folder, subset, tipo=1):
import os
import numpy as np
#n_folds = len(folds)
#Alterei para que inclua nas imagens da galeria i no conj. train, de forma a que as
# imagens correspondentes ao probe existam na galeria (train)
# for i in range(n_folds):
# train = folds[i][0]
# for j in range(n_folds):
# if j!=i :
# train = train + folds[j][0]+folds[j][1]+folds[j][2]
#
# n_train = len(train)
ch = 0
ds = []
id_ds = []
for image in subset:
fname = os.path.join(sift_folder, image[:-3]+'sift_ds')
ds1 = (np.loadtxt(open(fname,"r"),delimiter=",")).astype(np.uint8) #,skiprows=1)
if tipo == 1:
if ch == 0:
ch = 1
ds = []
ds.append(ds1)
id_ds.append(ds1.shape[0])
else:
ds.append(ds1)
id_ds.append(ds1.shape[0])
else:
if ch == 0:
ch = 1
ds = np.empty_like(ds1)
ds[:] = ds1
id_ds.append(ds1.shape[0])
else:
print ds.shape, ds1.shape
ds = np.concatenate((ds, ds1), axis=0)
id_ds.append(ds1.shape[0])
return ds, id_ds
#%%
def bov_histogramas_grava(arquivo, hists, dt):
resultFile = open(arquivo, 'w')
i = len(hists)
for h in hists:
line = (''.join(str(e) + ", " for e in h.tolist()))[:-2]
resultFile.write(line)
if i > 0:
resultFile.write("\n")
i = i - 1
resultFile.close()
#%%
def bov_codebook_gera(l_sift, nc, tipo):
if tipo == 1:
# http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html#sklearn.cluster.KMeans.fit
from sklearn.cluster import KMeans
est = KMeans(n_clusters=nc, init='k-means++', n_init=10, max_iter=100,
tol=0.0001, precompute_distances='auto', verbose=0,
random_state=None, copy_x=True, n_jobs=4)
est.fit(l_sift)
labels = est.labels_
centers = est.cluster_centers_
elif tipo == 2:
from sklearn.cluster import MiniBatchKMeans
est = MiniBatchKMeans(n_clusters=nc, init='k-means++', max_iter=100,
batch_size=3*nc, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01)
est.fit(l_sift)
labels = est.labels_
centers = est.cluster_centers_
else:
import random
from scipy.cluster.vq import vq
import numpy as np
list_of_random_items = random.sample(np.arange(l_sift.shape[0]), nc)
l_centroids = []
for i in list_of_random_items:
l_centroids.append(l_sift[i])
centers = np.asarray(l_centroids)
labels, _ = vq(l_sift, centers)
return (centers, labels)
#%%
def bov_histogramas_gera(labels, id_ds, k, nomes_imagens, vis=False):
from matplotlib import pyplot as plt
import numpy as np
#fv = np.vectorize(f)
hists = []
i = 0
for j in range(len(nomes_imagens)):
#ld = X[indices[j]].tolist()
n = id_ds[j]
sl = labels[i:i+n]
hist, bins = np.histogram(sl, bins=k, range=(0, k), normed=False,
weights=None, density=True)
if vis == True:
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.title("Histogram "+nomes_imagens[j])
plt.xlabel("Visual Word")
plt.ylabel("Frequency")
plt.bar(center, hist, align='center', width=width)
plt.show()
#print j
hists.append(hist)
#print hist
i = i + n
#j = j +1
return hists
def bov_descritores_codifica(X, centers):
from scipy.cluster.vq import vq
labels,_ = vq(X,centers)
return labels
| gpl-3.0 |
SaschaMester/delicium | chrome/test/data/nacl/gdb_rsp.py | 42 | 2542 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is based on gdb_rsp.py file from NaCl repository.
import re
import socket
import time
def RspChecksum(data):
checksum = 0
for char in data:
checksum = (checksum + ord(char)) % 0x100
return checksum
class EofOnReplyException(Exception):
pass
class GdbRspConnection(object):
def __init__(self, addr):
self._socket = self._Connect(addr)
def _Connect(self, addr):
# We have to poll because we do not know when sel_ldr has
# successfully done bind() on the TCP port. This is inherently
# unreliable.
# TODO(mseaborn): Add a more reliable connection mechanism to
# sel_ldr's debug stub.
timeout_in_seconds = 10
poll_time_in_seconds = 0.1
for i in xrange(int(timeout_in_seconds / poll_time_in_seconds)):
# On Mac OS X, we have to create a new socket FD for each retry.
sock = socket.socket()
try:
sock.connect(addr)
except socket.error:
# Retry after a delay.
time.sleep(poll_time_in_seconds)
else:
return sock
raise Exception('Could not connect to sel_ldr\'s debug stub in %i seconds'
% timeout_in_seconds)
def _GetReply(self):
reply = ''
while True:
data = self._socket.recv(1024)
if len(data) == 0:
if reply == '+':
raise EofOnReplyException()
raise AssertionError('EOF on socket reached with '
'incomplete reply message: %r' % reply)
reply += data
if '#' in data:
break
match = re.match('\+\$([^#]*)#([0-9a-fA-F]{2})$', reply)
if match is None:
raise AssertionError('Unexpected reply message: %r' % reply)
reply_body = match.group(1)
checksum = match.group(2)
expected_checksum = '%02x' % RspChecksum(reply_body)
if checksum != expected_checksum:
raise AssertionError('Bad RSP checksum: %r != %r' %
(checksum, expected_checksum))
# Send acknowledgement.
self._socket.send('+')
return reply_body
# Send an rsp message, but don't wait for or expect a reply.
def RspSendOnly(self, data):
msg = '$%s#%02x' % (data, RspChecksum(data))
return self._socket.send(msg)
def RspRequest(self, data):
self.RspSendOnly(data)
return self._GetReply()
def RspInterrupt(self):
self._socket.send('\x03')
return self._GetReply()
| bsd-3-clause |
kernc/scikit-learn | examples/decomposition/plot_image_denoising.py | 70 | 6249 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of a raccoon face image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
###############################################################################
try:
from scipy import misc
face = misc.face(gray=True)
except AttributeError:
# Old versions of scipy have face in the top level package
face = sp.face(gray=True)
# Convert from uint8 representation with values between 0 and 255 to
# a floating point representation with values between 0 and 1.
face = face / 255
# downsample for higher speed
face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2]
face /= 4.0
height, width = face.shape
# Distort the right half of the image
print('Distorting image...')
distorted = face.copy()
distorted[:, width // 2:] += 0.075 * np.random.randn(height, width // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :width // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from face patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, face, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, width // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = face.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, width // 2:] = reconstruct_from_patches_2d(
patches, (height, width // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], face,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
mitschabaude/nanopores | scripts/wei/test_geo.py | 1 | 1625 | # (c) 2017 Gregor Mitscha-Baude
import numpy as np
from nanopores.tools.polygons import Ball, Polygon, MultiPolygon, MultiPolygonPore
from nanopores.geometries.cylpore import MultiPore
from nanopores import user_params
params = user_params(
R = 100.,
R0 = 60.,
H0 = 70.,
H = 150.,
x0 = [0, 0, 46],
rMolecule = 2.1,
dim = 3,
no_membrane = True,
r0 = 13, # pore radius
angle = 40, # aperture angle in degrees
lcCenter = 0.3,
lcMolecule = 0.1,
h = 10.,
subs = "solid",
reconstruct = False,
poreregion = True,
)
# SiN membrane thickness (in vertical direction)
lsin = 50.
# Au membrane thickness (in vertical direction)
lau = 40.
# Au thickness in radial direction
rlau = 10.
# SAM layer thickness (in vertical direction)
lsam = 3
l0 = lau + lsin + lsam
angle2 = params.angle/2. * np.pi/180.
tan = np.tan(angle2)
sin = np.sin(angle2)
cos = np.cos(angle2)
l = l0/2.
r0 = params.r0
r1 = r0 + l0*tan
rsam = r0 + lsam/cos
rsin = r0 + lsam/cos + rlau
R = params.R
sam = [[r0, -l], [r1, l], [R, l], [R, l - lsam],
[rsam - tan*(lsam - l0), l - lsam], [rsam, -l]]
au = [sam[5], sam[4], sam[3], [R, -l + lsin], [rsin + tan*lsin, -l + lsin],
[rsin, -l]]
sin = [au[5], au[4], au[3], [R, -l]]
p = MultiPore(**params)
p.add_polygons(sam=sam, au=au, sin=sin)
receptor = Ball([30.,0.,30.], 7., lc=0.1)
p.add_balls(receptor=receptor)
geo = p.build(params.h, params.subs, params.reconstruct)
P = p.protein
P.plot(".k")
from matplotlib import pyplot as plt
plt.xlim(0, R + 5)
print geo
print geo.params
geo.plot_subdomains()
geo.plot_boundaries(interactive=True) | mit |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/pandas/tseries/offsets.py | 7 | 95013 | from datetime import date, datetime, timedelta
from pandas.compat import range
from pandas import compat
import numpy as np
from pandas.types.generic import ABCSeries, ABCDatetimeIndex, ABCPeriod
from pandas.tseries.tools import to_datetime, normalize_date
from pandas.core.common import AbstractMethodError
# import after tools, dateutil check
from dateutil.relativedelta import relativedelta, weekday
from dateutil.easter import easter
import pandas.tslib as tslib
from pandas.tslib import Timestamp, OutOfBoundsDatetime, Timedelta
import functools
import operator
__all__ = ['Day', 'BusinessDay', 'BDay', 'CustomBusinessDay', 'CDay',
'CBMonthEnd', 'CBMonthBegin',
'MonthBegin', 'BMonthBegin', 'MonthEnd', 'BMonthEnd',
'SemiMonthEnd', 'SemiMonthBegin',
'BusinessHour', 'CustomBusinessHour',
'YearBegin', 'BYearBegin', 'YearEnd', 'BYearEnd',
'QuarterBegin', 'BQuarterBegin', 'QuarterEnd', 'BQuarterEnd',
'LastWeekOfMonth', 'FY5253Quarter', 'FY5253',
'Week', 'WeekOfMonth', 'Easter',
'Hour', 'Minute', 'Second', 'Milli', 'Micro', 'Nano',
'DateOffset']
# convert to/from datetime/timestamp to allow invalid Timestamp ranges to
# pass thru
def as_timestamp(obj):
if isinstance(obj, Timestamp):
return obj
try:
return Timestamp(obj)
except (OutOfBoundsDatetime):
pass
return obj
def as_datetime(obj):
f = getattr(obj, 'to_pydatetime', None)
if f is not None:
obj = f()
return obj
def apply_wraps(func):
@functools.wraps(func)
def wrapper(self, other):
if other is tslib.NaT:
return tslib.NaT
elif isinstance(other, (timedelta, Tick, DateOffset)):
# timedelta path
return func(self, other)
elif isinstance(other, (np.datetime64, datetime, date)):
other = as_timestamp(other)
tz = getattr(other, 'tzinfo', None)
nano = getattr(other, 'nanosecond', 0)
try:
if self._adjust_dst and isinstance(other, Timestamp):
other = other.tz_localize(None)
result = func(self, other)
if self._adjust_dst:
result = tslib._localize_pydatetime(result, tz)
result = Timestamp(result)
if self.normalize:
result = result.normalize()
# nanosecond may be deleted depending on offset process
if not self.normalize and nano != 0:
if not isinstance(self, Nano) and result.nanosecond != nano:
if result.tz is not None:
# convert to UTC
value = tslib.tz_convert_single(
result.value, 'UTC', result.tz)
else:
value = result.value
result = Timestamp(value + nano)
if tz is not None and result.tzinfo is None:
result = tslib._localize_pydatetime(result, tz)
except OutOfBoundsDatetime:
result = func(self, as_datetime(other))
if self.normalize:
# normalize_date returns normal datetime
result = normalize_date(result)
if tz is not None and result.tzinfo is None:
result = tslib._localize_pydatetime(result, tz)
return result
return wrapper
def apply_index_wraps(func):
@functools.wraps(func)
def wrapper(self, other):
result = func(self, other)
if self.normalize:
result = result.to_period('D').to_timestamp()
return result
return wrapper
def _is_normalized(dt):
if (dt.hour != 0 or dt.minute != 0 or dt.second != 0 or
dt.microsecond != 0 or getattr(dt, 'nanosecond', 0) != 0):
return False
return True
# ---------------------------------------------------------------------
# DateOffset
class ApplyTypeError(TypeError):
# sentinel class for catching the apply error to return NotImplemented
pass
class CacheableOffset(object):
_cacheable = True
class DateOffset(object):
"""
Standard kind of date increment used for a date range.
Works exactly like relativedelta in terms of the keyword args you
pass in, use of the keyword n is discouraged-- you would be better
off specifying n in the keywords you use, but regardless it is
there for you. n is needed for DateOffset subclasses.
DateOffets work as follows. Each offset specify a set of dates
that conform to the DateOffset. For example, Bday defines this
set to be the set of dates that are weekdays (M-F). To test if a
date is in the set of a DateOffset dateOffset we can use the
onOffset method: dateOffset.onOffset(date).
If a date is not on a valid date, the rollback and rollforward
methods can be used to roll the date to the nearest valid date
before/after the date.
DateOffsets can be created to move dates forward a given number of
valid dates. For example, Bday(2) can be added to a date to move
it two business days forward. If the date does not start on a
valid date, first it is moved to a valid date. Thus psedo code
is:
def __add__(date):
date = rollback(date) # does nothing if date is valid
return date + <n number of periods>
When a date offset is created for a negitive number of periods,
the date is first rolled forward. The pseudo code is:
def __add__(date):
date = rollforward(date) # does nothing is date is valid
return date + <n number of periods>
Zero presents a problem. Should it roll forward or back? We
arbitrarily have it rollforward:
date + BDay(0) == BDay.rollforward(date)
Since 0 is a bit weird, we suggest avoiding its use.
"""
_cacheable = False
_normalize_cache = True
_kwds_use_relativedelta = (
'years', 'months', 'weeks', 'days',
'year', 'month', 'week', 'day', 'weekday',
'hour', 'minute', 'second', 'microsecond'
)
_use_relativedelta = False
_adjust_dst = False
# default for prior pickles
normalize = False
def __init__(self, n=1, normalize=False, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self._offset, self._use_relativedelta = self._determine_offset()
def _determine_offset(self):
# timedelta is used for sub-daily plural offsets and all singular
# offsets relativedelta is used for plural offsets of daily length or
# more nanosecond(s) are handled by apply_wraps
kwds_no_nanos = dict(
(k, v) for k, v in self.kwds.items()
if k not in ('nanosecond', 'nanoseconds')
)
use_relativedelta = False
if len(kwds_no_nanos) > 0:
if any(k in self._kwds_use_relativedelta for k in kwds_no_nanos):
use_relativedelta = True
offset = relativedelta(**kwds_no_nanos)
else:
# sub-daily offset - use timedelta (tz-aware)
offset = timedelta(**kwds_no_nanos)
else:
offset = timedelta(1)
return offset, use_relativedelta
@apply_wraps
def apply(self, other):
if self._use_relativedelta:
other = as_datetime(other)
if len(self.kwds) > 0:
tzinfo = getattr(other, 'tzinfo', None)
if tzinfo is not None and self._use_relativedelta:
# perform calculation in UTC
other = other.replace(tzinfo=None)
if self.n > 0:
for i in range(self.n):
other = other + self._offset
else:
for i in range(-self.n):
other = other - self._offset
if tzinfo is not None and self._use_relativedelta:
# bring tz back from UTC calculation
other = tslib._localize_pydatetime(other, tzinfo)
return as_timestamp(other)
else:
return other + timedelta(self.n)
@apply_index_wraps
def apply_index(self, i):
"""
Vectorized apply of DateOffset to DatetimeIndex,
raises NotImplentedError for offsets without a
vectorized implementation
.. versionadded:: 0.17.0
Parameters
----------
i : DatetimeIndex
Returns
-------
y : DatetimeIndex
"""
if not type(self) is DateOffset:
raise NotImplementedError("DateOffset subclass %s "
"does not have a vectorized "
"implementation"
% (self.__class__.__name__,))
relativedelta_fast = set(['years', 'months', 'weeks',
'days', 'hours', 'minutes',
'seconds', 'microseconds'])
# relativedelta/_offset path only valid for base DateOffset
if (self._use_relativedelta and
set(self.kwds).issubset(relativedelta_fast)):
months = ((self.kwds.get('years', 0) * 12 +
self.kwds.get('months', 0)) * self.n)
if months:
shifted = tslib.shift_months(i.asi8, months)
i = i._shallow_copy(shifted)
weeks = (self.kwds.get('weeks', 0)) * self.n
if weeks:
i = (i.to_period('W') + weeks).to_timestamp() + \
i.to_perioddelta('W')
timedelta_kwds = dict((k, v) for k, v in self.kwds.items()
if k in ['days', 'hours', 'minutes',
'seconds', 'microseconds'])
if timedelta_kwds:
delta = Timedelta(**timedelta_kwds)
i = i + (self.n * delta)
return i
elif not self._use_relativedelta and hasattr(self, '_offset'):
# timedelta
return i + (self._offset * self.n)
else:
# relativedelta with other keywords
raise NotImplementedError("DateOffset with relativedelta "
"keyword(s) %s not able to be "
"applied vectorized" %
(set(self.kwds) - relativedelta_fast),)
def isAnchored(self):
return (self.n == 1)
def copy(self):
return self.__class__(self.n, normalize=self.normalize, **self.kwds)
def _should_cache(self):
return self.isAnchored() and self._cacheable
def _params(self):
all_paras = dict(list(vars(self).items()) + list(self.kwds.items()))
if 'holidays' in all_paras and not all_paras['holidays']:
all_paras.pop('holidays')
exclude = ['kwds', 'name', 'normalize', 'calendar']
attrs = [(k, v) for k, v in all_paras.items()
if (k not in exclude) and (k[0] != '_')]
attrs = sorted(set(attrs))
params = tuple([str(self.__class__)] + attrs)
return params
def __repr__(self):
className = getattr(self, '_outputName', type(self).__name__)
exclude = set(['n', 'inc', 'normalize'])
attrs = []
for attr in sorted(self.__dict__):
if ((attr == 'kwds' and len(self.kwds) == 0) or
attr.startswith('_')):
continue
elif attr == 'kwds':
kwds_new = {}
for key in self.kwds:
if not hasattr(self, key):
kwds_new[key] = self.kwds[key]
if len(kwds_new) > 0:
attrs.append('='.join((attr, repr(kwds_new))))
else:
if attr not in exclude:
attrs.append('='.join((attr, repr(getattr(self, attr)))))
if abs(self.n) != 1:
plural = 's'
else:
plural = ''
n_str = ""
if self.n != 1:
n_str = "%s * " % self.n
out = '<%s' % n_str + className + plural
if attrs:
out += ': ' + ', '.join(attrs)
out += '>'
return out
@property
def name(self):
return self.rule_code
def __eq__(self, other):
if other is None:
return False
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if not isinstance(other, DateOffset):
return False
return self._params() == other._params()
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._params())
def __call__(self, other):
return self.apply(other)
def __add__(self, other):
if isinstance(other, (ABCDatetimeIndex, ABCSeries)):
return other + self
elif isinstance(other, ABCPeriod):
return other + self
try:
return self.apply(other)
except ApplyTypeError:
return NotImplemented
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, datetime):
raise TypeError('Cannot subtract datetime from offset.')
elif type(other) == type(self):
return self.__class__(self.n - other.n, normalize=self.normalize,
**self.kwds)
else: # pragma: no cover
return NotImplemented
def __rsub__(self, other):
if isinstance(other, (ABCDatetimeIndex, ABCSeries)):
return other - self
return self.__class__(-self.n, normalize=self.normalize,
**self.kwds) + other
def __mul__(self, someInt):
return self.__class__(n=someInt * self.n, normalize=self.normalize,
**self.kwds)
def __rmul__(self, someInt):
return self.__mul__(someInt)
def __neg__(self):
return self.__class__(-self.n, normalize=self.normalize, **self.kwds)
def rollback(self, dt):
"""Roll provided date backward to next offset only if not on offset"""
dt = as_timestamp(dt)
if not self.onOffset(dt):
dt = dt - self.__class__(1, normalize=self.normalize, **self.kwds)
return dt
def rollforward(self, dt):
"""Roll provided date forward to next offset only if not on offset"""
dt = as_timestamp(dt)
if not self.onOffset(dt):
dt = dt + self.__class__(1, normalize=self.normalize, **self.kwds)
return dt
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
# XXX, see #1395
if type(self) == DateOffset or isinstance(self, Tick):
return True
# Default (slow) method for determining if some date is a member of the
# date range generated by this offset. Subclasses may have this
# re-implemented in a nicer way.
a = dt
b = ((dt + self) - self)
return a == b
# helpers for vectorized offsets
def _beg_apply_index(self, i, freq):
"""Offsets index to beginning of Period frequency"""
off = i.to_perioddelta('D')
from pandas.tseries.frequencies import get_freq_code
base, mult = get_freq_code(freq)
base_period = i.to_period(base)
if self.n <= 0:
# when subtracting, dates on start roll to prior
roll = np.where(base_period.to_timestamp() == i - off,
self.n, self.n + 1)
else:
roll = self.n
base = (base_period + roll).to_timestamp()
return base + off
def _end_apply_index(self, i, freq):
"""Offsets index to end of Period frequency"""
off = i.to_perioddelta('D')
from pandas.tseries.frequencies import get_freq_code
base, mult = get_freq_code(freq)
base_period = i.to_period(base)
if self.n > 0:
# when adding, dates on end roll to next
roll = np.where(base_period.to_timestamp(how='end') == i - off,
self.n, self.n - 1)
else:
roll = self.n
base = (base_period + roll).to_timestamp(how='end')
return base + off
# way to get around weirdness with rule_code
@property
def _prefix(self):
raise NotImplementedError('Prefix not defined')
@property
def rule_code(self):
return self._prefix
@property
def freqstr(self):
try:
code = self.rule_code
except NotImplementedError:
return repr(self)
if self.n != 1:
fstr = '%d%s' % (self.n, code)
else:
fstr = code
return fstr
@property
def nanos(self):
raise ValueError("{0} is a non-fixed frequency".format(self))
class SingleConstructorOffset(DateOffset):
@classmethod
def _from_name(cls, suffix=None):
# default _from_name calls cls with no args
if suffix:
raise ValueError("Bad freq suffix %s" % suffix)
return cls()
class BusinessMixin(object):
""" mixin to business types to provide related functions """
# TODO: Combine this with DateOffset by defining a whitelisted set of
# attributes on each object rather than the existing behavior of iterating
# over internal ``__dict__``
def __repr__(self):
className = getattr(self, '_outputName', self.__class__.__name__)
if abs(self.n) != 1:
plural = 's'
else:
plural = ''
n_str = ""
if self.n != 1:
n_str = "%s * " % self.n
out = '<%s' % n_str + className + plural + self._repr_attrs() + '>'
return out
def _repr_attrs(self):
if self.offset:
attrs = ['offset=%s' % repr(self.offset)]
else:
attrs = None
out = ''
if attrs:
out += ': ' + ', '.join(attrs)
return out
def __getstate__(self):
"""Return a pickleable state"""
state = self.__dict__.copy()
# we don't want to actually pickle the calendar object
# as its a np.busyday; we recreate on deserilization
if 'calendar' in state:
del state['calendar']
try:
state['kwds'].pop('calendar')
except KeyError:
pass
return state
def __setstate__(self, state):
"""Reconstruct an instance from a pickled state"""
self.__dict__ = state
if 'weekmask' in state and 'holidays' in state:
calendar, holidays = self.get_calendar(weekmask=self.weekmask,
holidays=self.holidays,
calendar=None)
self.kwds['calendar'] = self.calendar = calendar
self.kwds['holidays'] = self.holidays = holidays
self.kwds['weekmask'] = state['weekmask']
class BusinessDay(BusinessMixin, SingleConstructorOffset):
"""
DateOffset subclass representing possibly n business days
"""
_prefix = 'B'
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
@property
def freqstr(self):
try:
code = self.rule_code
except NotImplementedError:
return repr(self)
if self.n != 1:
fstr = '%d%s' % (self.n, code)
else:
fstr = code
if self.offset:
fstr += self._offset_str()
return fstr
def _offset_str(self):
def get_str(td):
off_str = ''
if td.days > 0:
off_str += str(td.days) + 'D'
if td.seconds > 0:
s = td.seconds
hrs = int(s / 3600)
if hrs != 0:
off_str += str(hrs) + 'H'
s -= hrs * 3600
mts = int(s / 60)
if mts != 0:
off_str += str(mts) + 'Min'
s -= mts * 60
if s != 0:
off_str += str(s) + 's'
if td.microseconds > 0:
off_str += str(td.microseconds) + 'us'
return off_str
if isinstance(self.offset, timedelta):
zero = timedelta(0, 0, 0)
if self.offset >= zero:
off_str = '+' + get_str(self.offset)
else:
off_str = '-' + get_str(-self.offset)
return off_str
else:
return '+' + repr(self.offset)
def isAnchored(self):
return (self.n == 1)
@apply_wraps
def apply(self, other):
if isinstance(other, datetime):
n = self.n
if n == 0 and other.weekday() > 4:
n = 1
result = other
# avoid slowness below
if abs(n) > 5:
k = n // 5
result = result + timedelta(7 * k)
if n < 0 and result.weekday() > 4:
n += 1
n -= 5 * k
if n == 0 and result.weekday() > 4:
n -= 1
while n != 0:
k = n // abs(n)
result = result + timedelta(k)
if result.weekday() < 5:
n -= k
if self.offset:
result = result + self.offset
return result
elif isinstance(other, (timedelta, Tick)):
return BDay(self.n, offset=self.offset + other,
normalize=self.normalize)
else:
raise ApplyTypeError('Only know how to combine business day with '
'datetime or timedelta.')
@apply_index_wraps
def apply_index(self, i):
time = i.to_perioddelta('D')
# to_period rolls forward to next BDay; track and
# reduce n where it does when rolling forward
shifted = (i.to_perioddelta('B') - time).asi8 != 0
if self.n > 0:
roll = np.where(shifted, self.n - 1, self.n)
else:
roll = self.n
return (i.to_period('B') + roll).to_timestamp() + time
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.weekday() < 5
class BusinessHourMixin(BusinessMixin):
def __init__(self, **kwds):
# must be validated here to equality check
kwds['start'] = self._validate_time(kwds.get('start', '09:00'))
kwds['end'] = self._validate_time(kwds.get('end', '17:00'))
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
self.start = kwds.get('start', '09:00')
self.end = kwds.get('end', '17:00')
def _validate_time(self, t_input):
from datetime import time as dt_time
import time
if isinstance(t_input, compat.string_types):
try:
t = time.strptime(t_input, '%H:%M')
return dt_time(hour=t.tm_hour, minute=t.tm_min)
except ValueError:
raise ValueError("time data must match '%H:%M' format")
elif isinstance(t_input, dt_time):
if t_input.second != 0 or t_input.microsecond != 0:
raise ValueError(
"time data must be specified only with hour and minute")
return t_input
else:
raise ValueError("time data must be string or datetime.time")
def _get_daytime_flag(self):
if self.start == self.end:
raise ValueError('start and end must not be the same')
elif self.start < self.end:
return True
else:
return False
def _next_opening_time(self, other):
"""
If n is positive, return tomorrow's business day opening time.
Otherwise yesterday's business day's opening time.
Opening time always locates on BusinessDay.
Otherwise, closing time may not if business hour extends over midnight.
"""
if not self.next_bday.onOffset(other):
other = other + self.next_bday
else:
if self.n >= 0 and self.start < other.time():
other = other + self.next_bday
elif self.n < 0 and other.time() < self.start:
other = other + self.next_bday
return datetime(other.year, other.month, other.day,
self.start.hour, self.start.minute)
def _prev_opening_time(self, other):
"""
If n is positive, return yesterday's business day opening time.
Otherwise yesterday business day's opening time.
"""
if not self.next_bday.onOffset(other):
other = other - self.next_bday
else:
if self.n >= 0 and other.time() < self.start:
other = other - self.next_bday
elif self.n < 0 and other.time() > self.start:
other = other - self.next_bday
return datetime(other.year, other.month, other.day,
self.start.hour, self.start.minute)
def _get_business_hours_by_sec(self):
"""
Return business hours in a day by seconds.
"""
if self._get_daytime_flag():
# create dummy datetime to calcurate businesshours in a day
dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)
until = datetime(2014, 4, 1, self.end.hour, self.end.minute)
return tslib.tot_seconds(until - dtstart)
else:
self.daytime = False
dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)
until = datetime(2014, 4, 2, self.end.hour, self.end.minute)
return tslib.tot_seconds(until - dtstart)
@apply_wraps
def rollback(self, dt):
"""Roll provided date backward to next offset only if not on offset"""
if not self.onOffset(dt):
businesshours = self._get_business_hours_by_sec()
if self.n >= 0:
dt = self._prev_opening_time(
dt) + timedelta(seconds=businesshours)
else:
dt = self._next_opening_time(
dt) + timedelta(seconds=businesshours)
return dt
@apply_wraps
def rollforward(self, dt):
"""Roll provided date forward to next offset only if not on offset"""
if not self.onOffset(dt):
if self.n >= 0:
return self._next_opening_time(dt)
else:
return self._prev_opening_time(dt)
return dt
@apply_wraps
def apply(self, other):
# calcurate here because offset is not immutable
daytime = self._get_daytime_flag()
businesshours = self._get_business_hours_by_sec()
bhdelta = timedelta(seconds=businesshours)
if isinstance(other, datetime):
# used for detecting edge condition
nanosecond = getattr(other, 'nanosecond', 0)
# reset timezone and nanosecond
# other may be a Timestamp, thus not use replace
other = datetime(other.year, other.month, other.day,
other.hour, other.minute,
other.second, other.microsecond)
n = self.n
if n >= 0:
if (other.time() == self.end or
not self._onOffset(other, businesshours)):
other = self._next_opening_time(other)
else:
if other.time() == self.start:
# adjustment to move to previous business day
other = other - timedelta(seconds=1)
if not self._onOffset(other, businesshours):
other = self._next_opening_time(other)
other = other + bhdelta
bd, r = divmod(abs(n * 60), businesshours // 60)
if n < 0:
bd, r = -bd, -r
if bd != 0:
skip_bd = BusinessDay(n=bd)
# midnight business hour may not on BusinessDay
if not self.next_bday.onOffset(other):
remain = other - self._prev_opening_time(other)
other = self._next_opening_time(other + skip_bd) + remain
else:
other = other + skip_bd
hours, minutes = divmod(r, 60)
result = other + timedelta(hours=hours, minutes=minutes)
# because of previous adjustment, time will be larger than start
if ((daytime and (result.time() < self.start or
self.end < result.time())) or
not daytime and (self.end < result.time() < self.start)):
if n >= 0:
bday_edge = self._prev_opening_time(other)
bday_edge = bday_edge + bhdelta
# calcurate remainder
bday_remain = result - bday_edge
result = self._next_opening_time(other)
result += bday_remain
else:
bday_edge = self._next_opening_time(other)
bday_remain = result - bday_edge
result = self._next_opening_time(result) + bhdelta
result += bday_remain
# edge handling
if n >= 0:
if result.time() == self.end:
result = self._next_opening_time(result)
else:
if result.time() == self.start and nanosecond == 0:
# adjustment to move to previous business day
result = self._next_opening_time(
result - timedelta(seconds=1)) + bhdelta
return result
else:
raise ApplyTypeError(
'Only know how to combine business hour with ')
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
if dt.tzinfo is not None:
dt = datetime(dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second, dt.microsecond)
# Valid BH can be on the different BusinessDay during midnight
# Distinguish by the time spent from previous opening time
businesshours = self._get_business_hours_by_sec()
return self._onOffset(dt, businesshours)
def _onOffset(self, dt, businesshours):
"""
Slight speedups using calcurated values
"""
# if self.normalize and not _is_normalized(dt):
# return False
# Valid BH can be on the different BusinessDay during midnight
# Distinguish by the time spent from previous opening time
if self.n >= 0:
op = self._prev_opening_time(dt)
else:
op = self._next_opening_time(dt)
span = tslib.tot_seconds(dt - op)
if span <= businesshours:
return True
else:
return False
def _repr_attrs(self):
out = super(BusinessHourMixin, self)._repr_attrs()
start = self.start.strftime('%H:%M')
end = self.end.strftime('%H:%M')
attrs = ['{prefix}={start}-{end}'.format(prefix=self._prefix,
start=start, end=end)]
out += ': ' + ', '.join(attrs)
return out
class BusinessHour(BusinessHourMixin, SingleConstructorOffset):
"""
DateOffset subclass representing possibly n business days
.. versionadded: 0.16.1
"""
_prefix = 'BH'
_anchor = 0
def __init__(self, n=1, normalize=False, **kwds):
self.n = int(n)
self.normalize = normalize
super(BusinessHour, self).__init__(**kwds)
# used for moving to next businessday
if self.n >= 0:
nb_offset = 1
else:
nb_offset = -1
self.next_bday = BusinessDay(n=nb_offset)
class CustomBusinessDay(BusinessDay):
"""
**EXPERIMENTAL** DateOffset subclass representing possibly n business days
excluding holidays
.. warning:: EXPERIMENTAL
This class is not officially supported and the API is likely to change
in future versions. Use this at your own risk.
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
_prefix = 'C'
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
calendar, holidays = self.get_calendar(weekmask=weekmask,
holidays=holidays,
calendar=calendar)
# CustomBusinessDay instances are identified by the
# following two attributes. See DateOffset._params()
# holidays, weekmask
self.kwds['weekmask'] = self.weekmask = weekmask
self.kwds['holidays'] = self.holidays = holidays
self.kwds['calendar'] = self.calendar = calendar
def get_calendar(self, weekmask, holidays, calendar):
"""Generate busdaycalendar"""
if isinstance(calendar, np.busdaycalendar):
if not holidays:
holidays = tuple(calendar.holidays)
elif not isinstance(holidays, tuple):
holidays = tuple(holidays)
else:
# trust that calendar.holidays and holidays are
# consistent
pass
return calendar, holidays
if holidays is None:
holidays = []
try:
holidays = holidays + calendar.holidays().tolist()
except AttributeError:
pass
holidays = [self._to_dt64(dt, dtype='datetime64[D]') for dt in
holidays]
holidays = tuple(sorted(holidays))
kwargs = {'weekmask': weekmask}
if holidays:
kwargs['holidays'] = holidays
busdaycalendar = np.busdaycalendar(**kwargs)
return busdaycalendar, holidays
@apply_wraps
def apply(self, other):
if self.n <= 0:
roll = 'forward'
else:
roll = 'backward'
if isinstance(other, datetime):
date_in = other
np_dt = np.datetime64(date_in.date())
np_incr_dt = np.busday_offset(np_dt, self.n, roll=roll,
busdaycal=self.calendar)
dt_date = np_incr_dt.astype(datetime)
result = datetime.combine(dt_date, date_in.time())
if self.offset:
result = result + self.offset
return result
elif isinstance(other, (timedelta, Tick)):
return BDay(self.n, offset=self.offset + other,
normalize=self.normalize)
else:
raise ApplyTypeError('Only know how to combine trading day with '
'datetime, datetime64 or timedelta.')
def apply_index(self, i):
raise NotImplementedError
@staticmethod
def _to_dt64(dt, dtype='datetime64'):
# Currently
# > np.datetime64(dt.datetime(2013,5,1),dtype='datetime64[D]')
# numpy.datetime64('2013-05-01T02:00:00.000000+0200')
# Thus astype is needed to cast datetime to datetime64[D]
if getattr(dt, 'tzinfo', None) is not None:
i8 = tslib.pydt_to_i8(dt)
dt = tslib.tz_convert_single(i8, 'UTC', dt.tzinfo)
dt = Timestamp(dt)
dt = np.datetime64(dt)
if dt.dtype.name != dtype:
dt = dt.astype(dtype)
return dt
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
day64 = self._to_dt64(dt, 'datetime64[D]')
return np.is_busday(day64, busdaycal=self.calendar)
class CustomBusinessHour(BusinessHourMixin, SingleConstructorOffset):
"""
DateOffset subclass representing possibly n custom business days
.. versionadded: 0.18.1
"""
_prefix = 'CBH'
_anchor = 0
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, **kwds):
self.n = int(n)
self.normalize = normalize
super(CustomBusinessHour, self).__init__(**kwds)
# used for moving to next businessday
if self.n >= 0:
nb_offset = 1
else:
nb_offset = -1
self.next_bday = CustomBusinessDay(n=nb_offset,
weekmask=weekmask,
holidays=holidays,
calendar=calendar)
self.kwds['weekmask'] = self.next_bday.weekmask
self.kwds['holidays'] = self.next_bday.holidays
self.kwds['calendar'] = self.next_bday.calendar
class MonthOffset(SingleConstructorOffset):
_adjust_dst = True
@property
def name(self):
if self.isAnchored:
return self.rule_code
else:
return "%s-%s" % (self.rule_code, _int_to_month[self.n])
class MonthEnd(MonthOffset):
"""DateOffset of one month end"""
@apply_wraps
def apply(self, other):
n = self.n
_, days_in_month = tslib.monthrange(other.year, other.month)
if other.day != days_in_month:
other = other + relativedelta(months=-1, day=31)
if n <= 0:
n = n + 1
other = other + relativedelta(months=n, day=31)
return other
@apply_index_wraps
def apply_index(self, i):
shifted = tslib.shift_months(i.asi8, self.n, 'end')
return i._shallow_copy(shifted)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
days_in_month = tslib.monthrange(dt.year, dt.month)[1]
return dt.day == days_in_month
_prefix = 'M'
class MonthBegin(MonthOffset):
"""DateOffset of one month at beginning"""
@apply_wraps
def apply(self, other):
n = self.n
if other.day > 1 and n <= 0: # then roll forward if n<=0
n += 1
return other + relativedelta(months=n, day=1)
@apply_index_wraps
def apply_index(self, i):
shifted = tslib.shift_months(i.asi8, self.n, 'start')
return i._shallow_copy(shifted)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.day == 1
_prefix = 'MS'
class SemiMonthOffset(DateOffset):
_adjust_dst = True
_default_day_of_month = 15
_min_day_of_month = 2
def __init__(self, n=1, day_of_month=None, normalize=False, **kwds):
if day_of_month is None:
self.day_of_month = self._default_day_of_month
else:
self.day_of_month = int(day_of_month)
if not self._min_day_of_month <= self.day_of_month <= 27:
raise ValueError('day_of_month must be '
'{}<=day_of_month<=27, got {}'.format(
self._min_day_of_month, self.day_of_month))
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.kwds['day_of_month'] = self.day_of_month
@classmethod
def _from_name(cls, suffix=None):
return cls(day_of_month=suffix)
@property
def rule_code(self):
suffix = '-{}'.format(self.day_of_month)
return self._prefix + suffix
@apply_wraps
def apply(self, other):
n = self.n
if not self.onOffset(other):
_, days_in_month = tslib.monthrange(other.year, other.month)
if 1 < other.day < self.day_of_month:
other += relativedelta(day=self.day_of_month)
if n > 0:
# rollforward so subtract 1
n -= 1
elif self.day_of_month < other.day < days_in_month:
other += relativedelta(day=self.day_of_month)
if n < 0:
# rollforward in the negative direction so add 1
n += 1
elif n == 0:
n = 1
return self._apply(n, other)
def _apply(self, n, other):
"""Handle specific apply logic for child classes"""
raise AbstractMethodError(self)
@apply_index_wraps
def apply_index(self, i):
# determine how many days away from the 1st of the month we are
days_from_start = i.to_perioddelta('M').asi8
delta = Timedelta(days=self.day_of_month - 1).value
# get boolean array for each element before the day_of_month
before_day_of_month = days_from_start < delta
# get boolean array for each element after the day_of_month
after_day_of_month = days_from_start > delta
# determine the correct n for each date in i
roll = self._get_roll(i, before_day_of_month, after_day_of_month)
# isolate the time since it will be striped away one the next line
time = i.to_perioddelta('D')
# apply the correct number of months
i = (i.to_period('M') + (roll // 2)).to_timestamp()
# apply the correct day
i = self._apply_index_days(i, roll)
return i + time
def _get_roll(self, i, before_day_of_month, after_day_of_month):
"""Return an array with the correct n for each date in i.
The roll array is based on the fact that i gets rolled back to
the first day of the month.
"""
raise AbstractMethodError(self)
def _apply_index_days(self, i, roll):
"""Apply the correct day for each date in i"""
raise AbstractMethodError(self)
class SemiMonthEnd(SemiMonthOffset):
"""
Two DateOffset's per month repeating on the last
day of the month and day_of_month.
.. versionadded:: 0.19.0
Parameters
----------
n: int
normalize : bool, default False
day_of_month: int, {1, 3,...,27}, default 15
"""
_prefix = 'SM'
_min_day_of_month = 1
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
_, days_in_month = tslib.monthrange(dt.year, dt.month)
return dt.day in (self.day_of_month, days_in_month)
def _apply(self, n, other):
# if other.day is not day_of_month move to day_of_month and update n
if other.day < self.day_of_month:
other += relativedelta(day=self.day_of_month)
if n > 0:
n -= 1
elif other.day > self.day_of_month:
other += relativedelta(day=self.day_of_month)
if n == 0:
n = 1
else:
n += 1
months = n // 2
day = 31 if n % 2 else self.day_of_month
return other + relativedelta(months=months, day=day)
def _get_roll(self, i, before_day_of_month, after_day_of_month):
n = self.n
is_month_end = i.is_month_end
if n > 0:
roll_end = np.where(is_month_end, 1, 0)
roll_before = np.where(before_day_of_month, n, n + 1)
roll = roll_end + roll_before
elif n == 0:
roll_after = np.where(after_day_of_month, 2, 0)
roll_before = np.where(~after_day_of_month, 1, 0)
roll = roll_before + roll_after
else:
roll = np.where(after_day_of_month, n + 2, n + 1)
return roll
def _apply_index_days(self, i, roll):
i += (roll % 2) * Timedelta(days=self.day_of_month).value
return i + Timedelta(days=-1)
class SemiMonthBegin(SemiMonthOffset):
"""
Two DateOffset's per month repeating on the first
day of the month and day_of_month.
.. versionadded:: 0.19.0
Parameters
----------
n: int
normalize : bool, default False
day_of_month: int, {2, 3,...,27}, default 15
"""
_prefix = 'SMS'
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.day in (1, self.day_of_month)
def _apply(self, n, other):
# if other.day is not day_of_month move to day_of_month and update n
if other.day < self.day_of_month:
other += relativedelta(day=self.day_of_month)
if n == 0:
n = -1
else:
n -= 1
elif other.day > self.day_of_month:
other += relativedelta(day=self.day_of_month)
if n == 0:
n = 1
elif n < 0:
n += 1
months = n // 2 + n % 2
day = 1 if n % 2 else self.day_of_month
return other + relativedelta(months=months, day=day)
def _get_roll(self, i, before_day_of_month, after_day_of_month):
n = self.n
is_month_start = i.is_month_start
if n > 0:
roll = np.where(before_day_of_month, n, n + 1)
elif n == 0:
roll_start = np.where(is_month_start, 0, 1)
roll_after = np.where(after_day_of_month, 1, 0)
roll = roll_start + roll_after
else:
roll_after = np.where(after_day_of_month, n + 2, n + 1)
roll_start = np.where(is_month_start, -1, 0)
roll = roll_after + roll_start
return roll
def _apply_index_days(self, i, roll):
return i + (roll % 2) * Timedelta(days=self.day_of_month - 1).value
class BusinessMonthEnd(MonthOffset):
"""DateOffset increments between business EOM dates"""
def isAnchored(self):
return (self.n == 1)
@apply_wraps
def apply(self, other):
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, other.month)
lastBDay = days_in_month - max(((wkday + days_in_month - 1)
% 7) - 4, 0)
if n > 0 and not other.day >= lastBDay:
n = n - 1
elif n <= 0 and other.day > lastBDay:
n = n + 1
other = other + relativedelta(months=n, day=31)
if other.weekday() > 4:
other = other - BDay()
return other
_prefix = 'BM'
class BusinessMonthBegin(MonthOffset):
"""DateOffset of one business month at beginning"""
@apply_wraps
def apply(self, other):
n = self.n
wkday, _ = tslib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
if other.day > first and n <= 0:
# as if rolled forward already
n += 1
elif other.day < first and n > 0:
other = other + timedelta(days=first - other.day)
n -= 1
other = other + relativedelta(months=n)
wkday, _ = tslib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
result = datetime(other.year, other.month, first,
other.hour, other.minute,
other.second, other.microsecond)
return result
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
first_weekday, _ = tslib.monthrange(dt.year, dt.month)
if first_weekday == 5:
return dt.day == 3
elif first_weekday == 6:
return dt.day == 2
else:
return dt.day == 1
_prefix = 'BMS'
class CustomBusinessMonthEnd(BusinessMixin, MonthOffset):
"""
**EXPERIMENTAL** DateOffset of one custom business month
.. warning:: EXPERIMENTAL
This class is not officially supported and the API is likely to change
in future versions. Use this at your own risk.
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
_prefix = 'CBM'
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
self.cbday = CustomBusinessDay(n=self.n, normalize=normalize,
weekmask=weekmask, holidays=holidays,
calendar=calendar, **kwds)
self.m_offset = MonthEnd(n=1, normalize=normalize, **kwds)
self.kwds['calendar'] = self.cbday.calendar # cache numpy calendar
@apply_wraps
def apply(self, other):
n = self.n
# First move to month offset
cur_mend = self.m_offset.rollforward(other)
# Find this custom month offset
cur_cmend = self.cbday.rollback(cur_mend)
# handle zero case. arbitrarily rollforward
if n == 0 and other != cur_cmend:
n += 1
if other < cur_cmend and n >= 1:
n -= 1
elif other > cur_cmend and n <= -1:
n += 1
new = cur_mend + n * self.m_offset
result = self.cbday.rollback(new)
return result
class CustomBusinessMonthBegin(BusinessMixin, MonthOffset):
"""
**EXPERIMENTAL** DateOffset of one custom business month
.. warning:: EXPERIMENTAL
This class is not officially supported and the API is likely to change
in future versions. Use this at your own risk.
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
_prefix = 'CBMS'
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
self.cbday = CustomBusinessDay(n=self.n, normalize=normalize,
weekmask=weekmask, holidays=holidays,
calendar=calendar, **kwds)
self.m_offset = MonthBegin(n=1, normalize=normalize, **kwds)
self.kwds['calendar'] = self.cbday.calendar # cache numpy calendar
@apply_wraps
def apply(self, other):
n = self.n
dt_in = other
# First move to month offset
cur_mbegin = self.m_offset.rollback(dt_in)
# Find this custom month offset
cur_cmbegin = self.cbday.rollforward(cur_mbegin)
# handle zero case. arbitrarily rollforward
if n == 0 and dt_in != cur_cmbegin:
n += 1
if dt_in > cur_cmbegin and n <= -1:
n += 1
elif dt_in < cur_cmbegin and n >= 1:
n -= 1
new = cur_mbegin + n * self.m_offset
result = self.cbday.rollforward(new)
return result
class Week(DateOffset):
"""
Weekly offset
Parameters
----------
weekday : int, default None
Always generate specific day of week. 0 for Monday
"""
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.weekday = kwds.get('weekday', None)
if self.weekday is not None:
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got %d' %
self.weekday)
self._inc = timedelta(weeks=1)
self.kwds = kwds
def isAnchored(self):
return (self.n == 1 and self.weekday is not None)
@apply_wraps
def apply(self, other):
base = other
if self.weekday is None:
return other + self.n * self._inc
if self.n > 0:
k = self.n
otherDay = other.weekday()
if otherDay != self.weekday:
other = other + timedelta((self.weekday - otherDay) % 7)
k = k - 1
other = other
for i in range(k):
other = other + self._inc
else:
k = self.n
otherDay = other.weekday()
if otherDay != self.weekday:
other = other + timedelta((self.weekday - otherDay) % 7)
for i in range(-k):
other = other - self._inc
other = datetime(other.year, other.month, other.day,
base.hour, base.minute, base.second, base.microsecond)
return other
@apply_index_wraps
def apply_index(self, i):
if self.weekday is None:
return ((i.to_period('W') + self.n).to_timestamp() +
i.to_perioddelta('W'))
else:
return self._end_apply_index(i, self.freqstr)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.weekday() == self.weekday
_prefix = 'W'
@property
def rule_code(self):
suffix = ''
if self.weekday is not None:
suffix = '-%s' % (_int_to_weekday[self.weekday])
return self._prefix + suffix
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
weekday = None
else:
weekday = _weekday_to_int[suffix]
return cls(weekday=weekday)
class WeekDay(object):
MON = 0
TUE = 1
WED = 2
THU = 3
FRI = 4
SAT = 5
SUN = 6
_int_to_weekday = {
WeekDay.MON: 'MON',
WeekDay.TUE: 'TUE',
WeekDay.WED: 'WED',
WeekDay.THU: 'THU',
WeekDay.FRI: 'FRI',
WeekDay.SAT: 'SAT',
WeekDay.SUN: 'SUN'
}
_weekday_to_int = dict((v, k) for k, v in _int_to_weekday.items())
class WeekOfMonth(DateOffset):
"""
Describes monthly dates like "the Tuesday of the 2nd week of each month"
Parameters
----------
n : int
week : {0, 1, 2, 3, ...}
0 is 1st week of month, 1 2nd week, etc.
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
"""
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.weekday = kwds['weekday']
self.week = kwds['week']
if self.n == 0:
raise ValueError('N cannot be 0')
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got %d' %
self.weekday)
if self.week < 0 or self.week > 3:
raise ValueError('Week must be 0<=day<=3, got %d' %
self.week)
self.kwds = kwds
@apply_wraps
def apply(self, other):
base = other
offsetOfMonth = self.getOffsetOfMonth(other)
if offsetOfMonth > other:
if self.n > 0:
months = self.n - 1
else:
months = self.n
elif offsetOfMonth == other:
months = self.n
else:
if self.n > 0:
months = self.n
else:
months = self.n + 1
other = self.getOffsetOfMonth(
other + relativedelta(months=months, day=1))
other = datetime(other.year, other.month, other.day, base.hour,
base.minute, base.second, base.microsecond)
return other
def getOffsetOfMonth(self, dt):
w = Week(weekday=self.weekday)
d = datetime(dt.year, dt.month, 1, tzinfo=dt.tzinfo)
d = w.rollforward(d)
for i in range(self.week):
d = w.apply(d)
return d
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
d = datetime(dt.year, dt.month, dt.day, tzinfo=dt.tzinfo)
return d == self.getOffsetOfMonth(dt)
@property
def rule_code(self):
return '%s-%d%s' % (self._prefix, self.week + 1,
_int_to_weekday.get(self.weekday, ''))
_prefix = 'WOM'
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
raise ValueError("Prefix %r requires a suffix." % (cls._prefix))
# TODO: handle n here...
# only one digit weeks (1 --> week 0, 2 --> week 1, etc.)
week = int(suffix[0]) - 1
weekday = _weekday_to_int[suffix[1:]]
return cls(week=week, weekday=weekday)
class LastWeekOfMonth(DateOffset):
"""
Describes monthly dates in last week of month like "the last Tuesday of
each month"
Parameters
----------
n : int
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
"""
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.weekday = kwds['weekday']
if self.n == 0:
raise ValueError('N cannot be 0')
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got %d' %
self.weekday)
self.kwds = kwds
@apply_wraps
def apply(self, other):
offsetOfMonth = self.getOffsetOfMonth(other)
if offsetOfMonth > other:
if self.n > 0:
months = self.n - 1
else:
months = self.n
elif offsetOfMonth == other:
months = self.n
else:
if self.n > 0:
months = self.n
else:
months = self.n + 1
return self.getOffsetOfMonth(
other + relativedelta(months=months, day=1))
def getOffsetOfMonth(self, dt):
m = MonthEnd()
d = datetime(dt.year, dt.month, 1, dt.hour, dt.minute,
dt.second, dt.microsecond, tzinfo=dt.tzinfo)
eom = m.rollforward(d)
w = Week(weekday=self.weekday)
return w.rollback(eom)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt == self.getOffsetOfMonth(dt)
@property
def rule_code(self):
return '%s-%s' % (self._prefix, _int_to_weekday.get(self.weekday, ''))
_prefix = 'LWOM'
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
raise ValueError("Prefix %r requires a suffix." % (cls._prefix))
# TODO: handle n here...
weekday = _weekday_to_int[suffix]
return cls(weekday=weekday)
class QuarterOffset(DateOffset):
"""Quarter representation - doesn't call super"""
#: default month for __init__
_default_startingMonth = None
#: default month in _from_name
_from_name_startingMonth = None
_adjust_dst = True
# TODO: Consider combining QuarterOffset and YearOffset __init__ at some
# point
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.startingMonth = kwds.get('startingMonth',
self._default_startingMonth)
self.kwds = kwds
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@classmethod
def _from_name(cls, suffix=None):
kwargs = {}
if suffix:
kwargs['startingMonth'] = _month_to_int[suffix]
else:
if cls._from_name_startingMonth is not None:
kwargs['startingMonth'] = cls._from_name_startingMonth
return cls(**kwargs)
@property
def rule_code(self):
return '%s-%s' % (self._prefix, _int_to_month[self.startingMonth])
class BQuarterEnd(QuarterOffset):
"""DateOffset increments between business Quarter dates
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...
"""
_outputName = 'BusinessQuarterEnd'
_default_startingMonth = 3
# 'BQ'
_from_name_startingMonth = 12
_prefix = 'BQ'
@apply_wraps
def apply(self, other):
n = self.n
base = other
other = datetime(other.year, other.month, other.day,
other.hour, other.minute, other.second,
other.microsecond)
wkday, days_in_month = tslib.monthrange(other.year, other.month)
lastBDay = days_in_month - max(((wkday + days_in_month - 1)
% 7) - 4, 0)
monthsToGo = 3 - ((other.month - self.startingMonth) % 3)
if monthsToGo == 3:
monthsToGo = 0
if n > 0 and not (other.day >= lastBDay and monthsToGo == 0):
n = n - 1
elif n <= 0 and other.day > lastBDay and monthsToGo == 0:
n = n + 1
other = other + relativedelta(months=monthsToGo + 3 * n, day=31)
other = tslib._localize_pydatetime(other, base.tzinfo)
if other.weekday() > 4:
other = other - BDay()
return other
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
modMonth = (dt.month - self.startingMonth) % 3
return BMonthEnd().onOffset(dt) and modMonth == 0
_int_to_month = tslib._MONTH_ALIASES
_month_to_int = dict((v, k) for k, v in _int_to_month.items())
# TODO: This is basically the same as BQuarterEnd
class BQuarterBegin(QuarterOffset):
_outputName = "BusinessQuarterBegin"
# I suspect this is wrong for *all* of them.
_default_startingMonth = 3
_from_name_startingMonth = 1
_prefix = 'BQS'
@apply_wraps
def apply(self, other):
n = self.n
wkday, _ = tslib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
monthsSince = (other.month - self.startingMonth) % 3
if n <= 0 and monthsSince != 0: # make sure to roll forward so negate
monthsSince = monthsSince - 3
# roll forward if on same month later than first bday
if n <= 0 and (monthsSince == 0 and other.day > first):
n = n + 1
# pretend to roll back if on same month but before firstbday
elif n > 0 and (monthsSince == 0 and other.day < first):
n = n - 1
# get the first bday for result
other = other + relativedelta(months=3 * n - monthsSince)
wkday, _ = tslib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
result = datetime(other.year, other.month, first,
other.hour, other.minute, other.second,
other.microsecond)
return result
class QuarterEnd(QuarterOffset):
"""DateOffset increments between business Quarter dates
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/31/2007, 6/30/2007, ...
"""
_outputName = 'QuarterEnd'
_default_startingMonth = 3
_prefix = 'Q'
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.startingMonth = kwds.get('startingMonth', 3)
self.kwds = kwds
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@apply_wraps
def apply(self, other):
n = self.n
other = datetime(other.year, other.month, other.day,
other.hour, other.minute, other.second,
other.microsecond)
wkday, days_in_month = tslib.monthrange(other.year, other.month)
monthsToGo = 3 - ((other.month - self.startingMonth) % 3)
if monthsToGo == 3:
monthsToGo = 0
if n > 0 and not (other.day >= days_in_month and monthsToGo == 0):
n = n - 1
other = other + relativedelta(months=monthsToGo + 3 * n, day=31)
return other
@apply_index_wraps
def apply_index(self, i):
return self._end_apply_index(i, self.freqstr)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
modMonth = (dt.month - self.startingMonth) % 3
return MonthEnd().onOffset(dt) and modMonth == 0
class QuarterBegin(QuarterOffset):
_outputName = 'QuarterBegin'
_default_startingMonth = 3
_from_name_startingMonth = 1
_prefix = 'QS'
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@apply_wraps
def apply(self, other):
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, other.month)
monthsSince = (other.month - self.startingMonth) % 3
if n <= 0 and monthsSince != 0:
# make sure you roll forward, so negate
monthsSince = monthsSince - 3
if n <= 0 and (monthsSince == 0 and other.day > 1):
# after start, so come back an extra period as if rolled forward
n = n + 1
other = other + relativedelta(months=3 * n - monthsSince, day=1)
return other
@apply_index_wraps
def apply_index(self, i):
freq_month = 12 if self.startingMonth == 1 else self.startingMonth - 1
# freq_month = self.startingMonth
freqstr = 'Q-%s' % (_int_to_month[freq_month],)
return self._beg_apply_index(i, freqstr)
class YearOffset(DateOffset):
"""DateOffset that just needs a month"""
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.month = kwds.get('month', self._default_month)
if self.month < 1 or self.month > 12:
raise ValueError('Month must go from 1 to 12')
DateOffset.__init__(self, n=n, normalize=normalize, **kwds)
@classmethod
def _from_name(cls, suffix=None):
kwargs = {}
if suffix:
kwargs['month'] = _month_to_int[suffix]
return cls(**kwargs)
@property
def rule_code(self):
return '%s-%s' % (self._prefix, _int_to_month[self.month])
class BYearEnd(YearOffset):
"""DateOffset increments between business EOM dates"""
_outputName = 'BusinessYearEnd'
_default_month = 12
_prefix = 'BA'
@apply_wraps
def apply(self, other):
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, self.month)
lastBDay = (days_in_month -
max(((wkday + days_in_month - 1) % 7) - 4, 0))
years = n
if n > 0:
if (other.month < self.month or
(other.month == self.month and other.day < lastBDay)):
years -= 1
elif n <= 0:
if (other.month > self.month or
(other.month == self.month and other.day > lastBDay)):
years += 1
other = other + relativedelta(years=years)
_, days_in_month = tslib.monthrange(other.year, self.month)
result = datetime(other.year, self.month, days_in_month,
other.hour, other.minute, other.second,
other.microsecond)
if result.weekday() > 4:
result = result - BDay()
return result
class BYearBegin(YearOffset):
"""DateOffset increments between business year begin dates"""
_outputName = 'BusinessYearBegin'
_default_month = 1
_prefix = 'BAS'
@apply_wraps
def apply(self, other):
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, self.month)
first = _get_firstbday(wkday)
years = n
if n > 0: # roll back first for positive n
if (other.month < self.month or
(other.month == self.month and other.day < first)):
years -= 1
elif n <= 0: # roll forward
if (other.month > self.month or
(other.month == self.month and other.day > first)):
years += 1
# set first bday for result
other = other + relativedelta(years=years)
wkday, days_in_month = tslib.monthrange(other.year, self.month)
first = _get_firstbday(wkday)
return datetime(other.year, self.month, first, other.hour,
other.minute, other.second, other.microsecond)
class YearEnd(YearOffset):
"""DateOffset increments between calendar year ends"""
_default_month = 12
_prefix = 'A'
@apply_wraps
def apply(self, other):
def _increment(date):
if date.month == self.month:
_, days_in_month = tslib.monthrange(date.year, self.month)
if date.day != days_in_month:
year = date.year
else:
year = date.year + 1
elif date.month < self.month:
year = date.year
else:
year = date.year + 1
_, days_in_month = tslib.monthrange(year, self.month)
return datetime(year, self.month, days_in_month,
date.hour, date.minute, date.second,
date.microsecond)
def _decrement(date):
year = date.year if date.month > self.month else date.year - 1
_, days_in_month = tslib.monthrange(year, self.month)
return datetime(year, self.month, days_in_month,
date.hour, date.minute, date.second,
date.microsecond)
def _rollf(date):
if date.month != self.month or\
date.day < tslib.monthrange(date.year, date.month)[1]:
date = _increment(date)
return date
n = self.n
result = other
if n > 0:
while n > 0:
result = _increment(result)
n -= 1
elif n < 0:
while n < 0:
result = _decrement(result)
n += 1
else:
# n == 0, roll forward
result = _rollf(result)
return result
@apply_index_wraps
def apply_index(self, i):
# convert month anchor to annual period tuple
return self._end_apply_index(i, self.freqstr)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
wkday, days_in_month = tslib.monthrange(dt.year, self.month)
return self.month == dt.month and dt.day == days_in_month
class YearBegin(YearOffset):
"""DateOffset increments between calendar year begin dates"""
_default_month = 1
_prefix = 'AS'
@apply_wraps
def apply(self, other):
def _increment(date, n):
year = date.year + n - 1
if date.month >= self.month:
year += 1
return datetime(year, self.month, 1, date.hour, date.minute,
date.second, date.microsecond)
def _decrement(date, n):
year = date.year + n + 1
if date.month < self.month or (date.month == self.month and
date.day == 1):
year -= 1
return datetime(year, self.month, 1, date.hour, date.minute,
date.second, date.microsecond)
def _rollf(date):
if (date.month != self.month) or date.day > 1:
date = _increment(date, 1)
return date
n = self.n
result = other
if n > 0:
result = _increment(result, n)
elif n < 0:
result = _decrement(result, n)
else:
# n == 0, roll forward
result = _rollf(result)
return result
@apply_index_wraps
def apply_index(self, i):
freq_month = 12 if self.month == 1 else self.month - 1
freqstr = 'A-%s' % (_int_to_month[freq_month],)
return self._beg_apply_index(i, freqstr)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.month == self.month and dt.day == 1
class FY5253(DateOffset):
"""
Describes 52-53 week fiscal year. This is also known as a 4-4-5 calendar.
It is used by companies that desire that their
fiscal year always end on the same day of the week.
It is a method of managing accounting periods.
It is a common calendar structure for some industries,
such as retail, manufacturing and parking industry.
For more information see:
http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar
The year may either:
- end on the last X day of the Y month.
- end on the last X day closest to the last day of the Y month.
X is a specific day of the week.
Y is a certain month of the year
Parameters
----------
n : int
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
startingMonth : The month in which fiscal years end. {1, 2, ... 12}
variation : str
{"nearest", "last"} for "LastOfMonth" or "NearestEndMonth"
"""
_prefix = 'RE'
_suffix_prefix_last = 'L'
_suffix_prefix_nearest = 'N'
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.startingMonth = kwds['startingMonth']
self.weekday = kwds["weekday"]
self.variation = kwds["variation"]
self.kwds = kwds
if self.n == 0:
raise ValueError('N cannot be 0')
if self.variation not in ["nearest", "last"]:
raise ValueError('%s is not a valid variation' % self.variation)
if self.variation == "nearest":
weekday_offset = weekday(self.weekday)
self._rd_forward = relativedelta(weekday=weekday_offset)
self._rd_backward = relativedelta(weekday=weekday_offset(-1))
else:
self._offset_lwom = LastWeekOfMonth(n=1, weekday=self.weekday)
def isAnchored(self):
return self.n == 1 \
and self.startingMonth is not None \
and self.weekday is not None
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
dt = datetime(dt.year, dt.month, dt.day)
year_end = self.get_year_end(dt)
if self.variation == "nearest":
# We have to check the year end of "this" cal year AND the previous
return year_end == dt or \
self.get_year_end(dt - relativedelta(months=1)) == dt
else:
return year_end == dt
@apply_wraps
def apply(self, other):
n = self.n
prev_year = self.get_year_end(
datetime(other.year - 1, self.startingMonth, 1))
cur_year = self.get_year_end(
datetime(other.year, self.startingMonth, 1))
next_year = self.get_year_end(
datetime(other.year + 1, self.startingMonth, 1))
prev_year = tslib._localize_pydatetime(prev_year, other.tzinfo)
cur_year = tslib._localize_pydatetime(cur_year, other.tzinfo)
next_year = tslib._localize_pydatetime(next_year, other.tzinfo)
if n > 0:
if other == prev_year:
year = other.year - 1
elif other == cur_year:
year = other.year
elif other == next_year:
year = other.year + 1
elif other < prev_year:
year = other.year - 1
n -= 1
elif other < cur_year:
year = other.year
n -= 1
elif other < next_year:
year = other.year + 1
n -= 1
else:
assert False
result = self.get_year_end(
datetime(year + n, self.startingMonth, 1))
result = datetime(result.year, result.month, result.day,
other.hour, other.minute, other.second,
other.microsecond)
return result
else:
n = -n
if other == prev_year:
year = other.year - 1
elif other == cur_year:
year = other.year
elif other == next_year:
year = other.year + 1
elif other > next_year:
year = other.year + 1
n -= 1
elif other > cur_year:
year = other.year
n -= 1
elif other > prev_year:
year = other.year - 1
n -= 1
else:
assert False
result = self.get_year_end(
datetime(year - n, self.startingMonth, 1))
result = datetime(result.year, result.month, result.day,
other.hour, other.minute, other.second,
other.microsecond)
return result
def get_year_end(self, dt):
if self.variation == "nearest":
return self._get_year_end_nearest(dt)
else:
return self._get_year_end_last(dt)
def get_target_month_end(self, dt):
target_month = datetime(
dt.year, self.startingMonth, 1, tzinfo=dt.tzinfo)
next_month_first_of = target_month + relativedelta(months=+1)
return next_month_first_of + relativedelta(days=-1)
def _get_year_end_nearest(self, dt):
target_date = self.get_target_month_end(dt)
if target_date.weekday() == self.weekday:
return target_date
else:
forward = target_date + self._rd_forward
backward = target_date + self._rd_backward
if forward - target_date < target_date - backward:
return forward
else:
return backward
def _get_year_end_last(self, dt):
current_year = datetime(
dt.year, self.startingMonth, 1, tzinfo=dt.tzinfo)
return current_year + self._offset_lwom
@property
def rule_code(self):
suffix = self.get_rule_code_suffix()
return "%s-%s" % (self._get_prefix(), suffix)
def _get_prefix(self):
return self._prefix
def _get_suffix_prefix(self):
if self.variation == "nearest":
return self._suffix_prefix_nearest
else:
return self._suffix_prefix_last
def get_rule_code_suffix(self):
return '%s-%s-%s' % (self._get_suffix_prefix(),
_int_to_month[self.startingMonth],
_int_to_weekday[self.weekday])
@classmethod
def _parse_suffix(cls, varion_code, startingMonth_code, weekday_code):
if varion_code == "N":
variation = "nearest"
elif varion_code == "L":
variation = "last"
else:
raise ValueError(
"Unable to parse varion_code: %s" % (varion_code,))
startingMonth = _month_to_int[startingMonth_code]
weekday = _weekday_to_int[weekday_code]
return {
"weekday": weekday,
"startingMonth": startingMonth,
"variation": variation,
}
@classmethod
def _from_name(cls, *args):
return cls(**cls._parse_suffix(*args))
class FY5253Quarter(DateOffset):
"""
DateOffset increments between business quarter dates
for 52-53 week fiscal year (also known as a 4-4-5 calendar).
It is used by companies that desire that their
fiscal year always end on the same day of the week.
It is a method of managing accounting periods.
It is a common calendar structure for some industries,
such as retail, manufacturing and parking industry.
For more information see:
http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar
The year may either:
- end on the last X day of the Y month.
- end on the last X day closest to the last day of the Y month.
X is a specific day of the week.
Y is a certain month of the year
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...
Parameters
----------
n : int
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
startingMonth : The month in which fiscal years end. {1, 2, ... 12}
qtr_with_extra_week : The quarter number that has the leap
or 14 week when needed. {1,2,3,4}
variation : str
{"nearest", "last"} for "LastOfMonth" or "NearestEndMonth"
"""
_prefix = 'REQ'
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.qtr_with_extra_week = kwds["qtr_with_extra_week"]
self.kwds = kwds
if self.n == 0:
raise ValueError('N cannot be 0')
self._offset = FY5253(
startingMonth=kwds['startingMonth'],
weekday=kwds["weekday"],
variation=kwds["variation"])
def isAnchored(self):
return self.n == 1 and self._offset.isAnchored()
@apply_wraps
def apply(self, other):
base = other
n = self.n
if n > 0:
while n > 0:
if not self._offset.onOffset(other):
qtr_lens = self.get_weeks(other)
start = other - self._offset
else:
start = other
qtr_lens = self.get_weeks(other + self._offset)
for weeks in qtr_lens:
start += relativedelta(weeks=weeks)
if start > other:
other = start
n -= 1
break
else:
n = -n
while n > 0:
if not self._offset.onOffset(other):
qtr_lens = self.get_weeks(other)
end = other + self._offset
else:
end = other
qtr_lens = self.get_weeks(other)
for weeks in reversed(qtr_lens):
end -= relativedelta(weeks=weeks)
if end < other:
other = end
n -= 1
break
other = datetime(other.year, other.month, other.day,
base.hour, base.minute, base.second, base.microsecond)
return other
def get_weeks(self, dt):
ret = [13] * 4
year_has_extra_week = self.year_has_extra_week(dt)
if year_has_extra_week:
ret[self.qtr_with_extra_week - 1] = 14
return ret
def year_has_extra_week(self, dt):
if self._offset.onOffset(dt):
prev_year_end = dt - self._offset
next_year_end = dt
else:
next_year_end = dt + self._offset
prev_year_end = dt - self._offset
week_in_year = (next_year_end - prev_year_end).days / 7
return week_in_year == 53
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
if self._offset.onOffset(dt):
return True
next_year_end = dt - self._offset
qtr_lens = self.get_weeks(dt)
current = next_year_end
for qtr_len in qtr_lens[0:4]:
current += relativedelta(weeks=qtr_len)
if dt == current:
return True
return False
@property
def rule_code(self):
suffix = self._offset.get_rule_code_suffix()
return "%s-%s" % (self._prefix,
"%s-%d" % (suffix, self.qtr_with_extra_week))
@classmethod
def _from_name(cls, *args):
return cls(**dict(FY5253._parse_suffix(*args[:-1]),
qtr_with_extra_week=int(args[-1])))
class Easter(DateOffset):
"""
DateOffset for the Easter holiday using
logic defined in dateutil. Right now uses
the revised method which is valid in years
1583-4099.
"""
_adjust_dst = True
def __init__(self, n=1, **kwds):
super(Easter, self).__init__(n, **kwds)
@apply_wraps
def apply(self, other):
currentEaster = easter(other.year)
currentEaster = datetime(
currentEaster.year, currentEaster.month, currentEaster.day)
currentEaster = tslib._localize_pydatetime(currentEaster, other.tzinfo)
# NOTE: easter returns a datetime.date so we have to convert to type of
# other
if self.n >= 0:
if other >= currentEaster:
new = easter(other.year + self.n)
else:
new = easter(other.year + self.n - 1)
else:
if other > currentEaster:
new = easter(other.year + self.n + 1)
else:
new = easter(other.year + self.n)
new = datetime(new.year, new.month, new.day, other.hour,
other.minute, other.second, other.microsecond)
return new
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return date(dt.year, dt.month, dt.day) == easter(dt.year)
# ---------------------------------------------------------------------
# Ticks
def _tick_comp(op):
def f(self, other):
return op(self.delta, other.delta)
return f
class Tick(SingleConstructorOffset):
_inc = Timedelta(microseconds=1000)
__gt__ = _tick_comp(operator.gt)
__ge__ = _tick_comp(operator.ge)
__lt__ = _tick_comp(operator.lt)
__le__ = _tick_comp(operator.le)
__eq__ = _tick_comp(operator.eq)
__ne__ = _tick_comp(operator.ne)
def __add__(self, other):
if isinstance(other, Tick):
if type(self) == type(other):
return type(self)(self.n + other.n)
else:
return _delta_to_tick(self.delta + other.delta)
elif isinstance(other, ABCPeriod):
return other + self
try:
return self.apply(other)
except ApplyTypeError:
return NotImplemented
def __eq__(self, other):
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if isinstance(other, Tick):
return self.delta == other.delta
else:
return DateOffset.__eq__(self, other)
# This is identical to DateOffset.__hash__, but has to be redefined here
# for Python 3, because we've redefined __eq__.
def __hash__(self):
return hash(self._params())
def __ne__(self, other):
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if isinstance(other, Tick):
return self.delta != other.delta
else:
return DateOffset.__ne__(self, other)
@property
def delta(self):
return self.n * self._inc
@property
def nanos(self):
return _delta_to_nanoseconds(self.delta)
def apply(self, other):
# Timestamp can handle tz and nano sec, thus no need to use apply_wraps
if isinstance(other, (datetime, np.datetime64, date)):
return as_timestamp(other) + self
if isinstance(other, timedelta):
return other + self.delta
elif isinstance(other, type(self)):
return type(self)(self.n + other.n)
else:
raise ApplyTypeError('Unhandled type: %s' % type(other).__name__)
_prefix = 'undefined'
def isAnchored(self):
return False
def _delta_to_tick(delta):
if delta.microseconds == 0:
if delta.seconds == 0:
return Day(delta.days)
else:
seconds = delta.days * 86400 + delta.seconds
if seconds % 3600 == 0:
return Hour(seconds / 3600)
elif seconds % 60 == 0:
return Minute(seconds / 60)
else:
return Second(seconds)
else:
nanos = _delta_to_nanoseconds(delta)
if nanos % 1000000 == 0:
return Milli(nanos // 1000000)
elif nanos % 1000 == 0:
return Micro(nanos // 1000)
else: # pragma: no cover
return Nano(nanos)
_delta_to_nanoseconds = tslib._delta_to_nanoseconds
class Day(Tick):
_inc = Timedelta(days=1)
_prefix = 'D'
class Hour(Tick):
_inc = Timedelta(hours=1)
_prefix = 'H'
class Minute(Tick):
_inc = Timedelta(minutes=1)
_prefix = 'T'
class Second(Tick):
_inc = Timedelta(seconds=1)
_prefix = 'S'
class Milli(Tick):
_inc = Timedelta(milliseconds=1)
_prefix = 'L'
class Micro(Tick):
_inc = Timedelta(microseconds=1)
_prefix = 'U'
class Nano(Tick):
_inc = Timedelta(nanoseconds=1)
_prefix = 'N'
BDay = BusinessDay
BMonthEnd = BusinessMonthEnd
BMonthBegin = BusinessMonthBegin
CBMonthEnd = CustomBusinessMonthEnd
CBMonthBegin = CustomBusinessMonthBegin
CDay = CustomBusinessDay
def _get_firstbday(wkday):
"""
wkday is the result of monthrange(year, month)
If it's a saturday or sunday, increment first business day to reflect this
"""
first = 1
if wkday == 5: # on Saturday
first = 3
elif wkday == 6: # on Sunday
first = 2
return first
def generate_range(start=None, end=None, periods=None,
offset=BDay(), time_rule=None):
"""
Generates a sequence of dates corresponding to the specified time
offset. Similar to dateutil.rrule except uses pandas DateOffset
objects to represent time increments
Parameters
----------
start : datetime (default None)
end : datetime (default None)
periods : int, optional
time_rule : (legacy) name of DateOffset object to be used, optional
Corresponds with names expected by tseries.frequencies.get_offset
Notes
-----
* This method is faster for generating weekdays than dateutil.rrule
* At least two of (start, end, periods) must be specified.
* If both start and end are specified, the returned dates will
satisfy start <= date <= end.
* If both time_rule and offset are specified, time_rule supersedes offset.
Returns
-------
dates : generator object
"""
if time_rule is not None:
from pandas.tseries.frequencies import get_offset
offset = get_offset(time_rule)
start = to_datetime(start)
end = to_datetime(end)
if start and not offset.onOffset(start):
start = offset.rollforward(start)
elif end and not offset.onOffset(end):
end = offset.rollback(end)
if periods is None and end < start:
end = None
periods = 0
if end is None:
end = start + (periods - 1) * offset
if start is None:
start = end - (periods - 1) * offset
cur = start
if offset.n >= 0:
while cur <= end:
yield cur
# faster than cur + offset
next_date = offset.apply(cur)
if next_date <= cur:
raise ValueError('Offset %s did not increment date' % offset)
cur = next_date
else:
while cur >= end:
yield cur
# faster than cur + offset
next_date = offset.apply(cur)
if next_date >= cur:
raise ValueError('Offset %s did not decrement date' % offset)
cur = next_date
prefix_mapping = dict((offset._prefix, offset) for offset in [
YearBegin, # 'AS'
YearEnd, # 'A'
BYearBegin, # 'BAS'
BYearEnd, # 'BA'
BusinessDay, # 'B'
BusinessMonthBegin, # 'BMS'
BusinessMonthEnd, # 'BM'
BQuarterEnd, # 'BQ'
BQuarterBegin, # 'BQS'
BusinessHour, # 'BH'
CustomBusinessDay, # 'C'
CustomBusinessMonthEnd, # 'CBM'
CustomBusinessMonthBegin, # 'CBMS'
CustomBusinessHour, # 'CBH'
MonthEnd, # 'M'
MonthBegin, # 'MS'
SemiMonthEnd, # 'SM'
SemiMonthBegin, # 'SMS'
Week, # 'W'
Second, # 'S'
Minute, # 'T'
Micro, # 'U'
QuarterEnd, # 'Q'
QuarterBegin, # 'QS'
Milli, # 'L'
Hour, # 'H'
Day, # 'D'
WeekOfMonth, # 'WOM'
FY5253,
FY5253Quarter,
])
prefix_mapping['N'] = Nano
| mit |
Sw3m/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers | Chapter2_MorePyMC/separation_plot.py | 86 | 1494 | # separation plot
# Author: Cameron Davidson-Pilon,2013
# see http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
import matplotlib.pyplot as plt
import numpy as np
def separation_plot( p, y, **kwargs ):
"""
This function creates a separation plot for logistic and probit classification.
See http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
p: The proportions/probabilities, can be a nxM matrix which represents M models.
y: the 0-1 response variables.
"""
assert p.shape[0] == y.shape[0], "p.shape[0] != y.shape[0]"
n = p.shape[0]
try:
M = p.shape[1]
except:
p = p.reshape( n, 1 )
M = p.shape[1]
#colors = np.array( ["#fdf2db", "#e44a32"] )
colors_bmh = np.array( ["#eeeeee", "#348ABD"] )
fig = plt.figure( )#figsize = (8, 1.3*M) )
for i in range(M):
ax = fig.add_subplot(M, 1, i+1)
ix = np.argsort( p[:,i] )
#plot the different bars
bars = ax.bar( np.arange(n), np.ones(n), width=1.,
color = colors_bmh[ y[ix].astype(int) ],
edgecolor = 'none')
ax.plot( np.arange(n+1), np.append(p[ix,i], p[ix,i][-1]), "k",
linewidth = 1.,drawstyle="steps-post" )
#create expected value bar.
ax.vlines( [(1-p[ix,i]).sum()], [0], [1] )
#ax.grid(False)
#ax.axis('off')
plt.xlim( 0, n)
plt.tight_layout()
return
| mit |
XueqingLin/tensorflow | tensorflow/contrib/factorization/python/ops/kmeans_test.py | 23 | 14710 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for KMeans."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
import numpy as np
from sklearn.cluster import KMeans as SklearnKMeans
import tensorflow as tf
from tensorflow.python.platform import benchmark
FLAGS = tf.app.flags.FLAGS
def normalize(x):
return x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))
def cosine_similarity(x, y):
return np.dot(normalize(x), np.transpose(normalize(y)))
def make_random_centers(num_centers, num_dims, center_norm=500):
return np.round(np.random.rand(num_centers, num_dims).astype(np.float32) *
center_norm)
def make_random_points(centers, num_points, max_offset=20):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(np.random.randn(num_points, num_dims).astype(np.float32) *
max_offset)
return (centers[assignments] + offsets,
assignments,
np.add.reduce(offsets * offsets, 1))
class KMeansTest(tf.test.TestCase):
def setUp(self):
np.random.seed(3)
self.num_centers = 5
self.num_dims = 2
self.num_points = 10000
self.true_centers = make_random_centers(self.num_centers, self.num_dims)
self.points, _, self.scores = make_random_points(self.true_centers,
self.num_points)
self.true_score = np.add.reduce(self.scores)
self.kmeans = tf.contrib.factorization.KMeansClustering(
self.num_centers,
initial_clusters=tf.contrib.factorization.RANDOM_INIT,
use_mini_batch=self.use_mini_batch,
config=self.config(14),
random_seed=12)
@staticmethod
def config(tf_random_seed):
return tf.contrib.learn.RunConfig(tf_random_seed=tf_random_seed)
@property
def batch_size(self):
return self.num_points
@property
def use_mini_batch(self):
return False
def test_clusters(self):
kmeans = self.kmeans
kmeans.fit(x=self.points, steps=1, batch_size=8)
clusters = kmeans.clusters()
self.assertAllEqual(list(clusters.shape),
[self.num_centers, self.num_dims])
def test_fit(self):
if self.batch_size != self.num_points:
# TODO(agarwal): Doesn't work with mini-batch.
return
kmeans = self.kmeans
kmeans.fit(x=self.points,
steps=1, batch_size=self.batch_size)
score1 = kmeans.score(x=self.points)
kmeans.fit(x=self.points,
steps=15 * self.num_points // self.batch_size,
batch_size=self.batch_size)
score2 = kmeans.score(x=self.points)
self.assertTrue(score1 > score2)
self.assertNear(self.true_score, score2, self.true_score * 0.05)
def test_monitor(self):
if self.batch_size != self.num_points:
# TODO(agarwal): Doesn't work with mini-batch.
return
kmeans = tf.contrib.factorization.KMeansClustering(
self.num_centers,
initial_clusters=tf.contrib.factorization.RANDOM_INIT,
use_mini_batch=self.use_mini_batch,
config=tf.contrib.learn.RunConfig(tf_random_seed=14),
random_seed=12)
kmeans.fit(x=self.points,
# Force it to train forever until the monitor stops it.
steps=None,
batch_size=self.batch_size,
relative_tolerance=1e-4)
score = kmeans.score(x=self.points)
self.assertNear(self.true_score, score, self.true_score * 0.005)
def test_infer(self):
kmeans = self.kmeans
kmeans.fit(x=self.points, steps=10, batch_size=128)
clusters = kmeans.clusters()
# Make a small test set
points, true_assignments, true_offsets = make_random_points(clusters, 10)
# Test predict
assignments = kmeans.predict(points, batch_size=self.batch_size)
self.assertAllEqual(assignments, true_assignments)
# Test score
score = kmeans.score(points, batch_size=128)
self.assertNear(score, np.sum(true_offsets), 0.01 * score)
# Test transform
transform = kmeans.transform(points, batch_size=128)
true_transform = np.maximum(
0,
np.sum(np.square(points), axis=1, keepdims=True) -
2 * np.dot(points, np.transpose(clusters)) +
np.transpose(np.sum(np.square(clusters), axis=1, keepdims=True)))
self.assertAllClose(transform, true_transform, rtol=0.05, atol=10)
def test_fit_with_cosine_distance(self):
# Create points on y=x and y=1.5x lines to check the cosine similarity.
# Note that euclidean distance will give different results in this case.
points = np.array(
[[9, 9], [0.5, 0.5], [10, 15], [0.4, 0.6]], dtype=np.float32)
# true centers are the unit vectors on lines y=x and y=1.5x
true_centers = np.array(
[[0.70710678, 0.70710678], [0.5547002, 0.83205029]], dtype=np.float32)
kmeans = tf.contrib.factorization.KMeansClustering(
2,
initial_clusters=tf.contrib.factorization.RANDOM_INIT,
distance_metric=tf.contrib.factorization.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
config=self.config(2),
random_seed=12)
kmeans.fit(x=points, steps=10, batch_size=4)
centers = normalize(kmeans.clusters())
self.assertAllClose(np.sort(centers, axis=0),
np.sort(true_centers, axis=0))
def test_transform_with_cosine_distance(self):
points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2],
[0.1, 2.5], [0.2, 2], [0.1, 3], [0.2, 4]], dtype=np.float32)
true_centers = [normalize(np.mean(normalize(points)[4:, :], axis=0,
keepdims=True))[0],
normalize(np.mean(normalize(points)[0:4, :], axis=0,
keepdims=True))[0]]
kmeans = tf.contrib.factorization.KMeansClustering(
2,
initial_clusters=tf.contrib.factorization.RANDOM_INIT,
distance_metric=tf.contrib.factorization.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
config=self.config(5))
kmeans.fit(x=points, steps=50, batch_size=8)
centers = normalize(kmeans.clusters())
self.assertAllClose(np.sort(centers, axis=0),
np.sort(true_centers, axis=0),
atol=1e-2)
true_transform = 1 - cosine_similarity(points, centers)
transform = kmeans.transform(points, batch_size=8)
self.assertAllClose(transform, true_transform, atol=1e-3)
def test_predict_with_cosine_distance(self):
points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2],
[0.1, 2.5], [0.2, 2], [0.1, 3], [0.2, 4]], dtype=np.float32)
true_centers = np.array(
[normalize(np.mean(normalize(points)[0:4, :],
axis=0,
keepdims=True))[0],
normalize(np.mean(normalize(points)[4:, :],
axis=0,
keepdims=True))[0]], dtype=np.float32)
true_assignments = [0] * 4 + [1] * 4
true_score = len(points) - np.tensordot(normalize(points),
true_centers[true_assignments])
kmeans = tf.contrib.factorization.KMeansClustering(
2,
initial_clusters=tf.contrib.factorization.RANDOM_INIT,
distance_metric=tf.contrib.factorization.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
config=self.config(3))
kmeans.fit(x=points, steps=30, batch_size=8)
centers = normalize(kmeans.clusters())
self.assertAllClose(np.sort(centers, axis=0),
np.sort(true_centers, axis=0), atol=1e-2)
assignments = kmeans.predict(points, batch_size=8)
self.assertAllClose(centers[assignments],
true_centers[true_assignments], atol=1e-2)
score = kmeans.score(points, batch_size=8)
self.assertAllClose(score, true_score, atol=1e-2)
def test_predict_with_cosine_distance_and_kmeans_plus_plus(self):
# Most points are concetrated near one center. KMeans++ is likely to find
# the less populated centers.
points = np.array([[2.5, 3.5], [2.5, 3.5], [-2, 3], [-2, 3], [-3, -3],
[-3.1, -3.2], [-2.8, -3.], [-2.9, -3.1], [-3., -3.1],
[-3., -3.1], [-3.2, -3.], [-3., -3.]], dtype=np.float32)
true_centers = np.array(
[normalize(np.mean(normalize(points)[0:2, :], axis=0,
keepdims=True))[0],
normalize(np.mean(normalize(points)[2:4, :], axis=0,
keepdims=True))[0],
normalize(np.mean(normalize(points)[4:, :], axis=0,
keepdims=True))[0]], dtype=np.float32)
true_assignments = [0] * 2 + [1] * 2 + [2] * 8
true_score = len(points) - np.tensordot(normalize(points),
true_centers[true_assignments])
kmeans = tf.contrib.factorization.KMeansClustering(
3,
initial_clusters=tf.contrib.factorization.KMEANS_PLUS_PLUS_INIT,
distance_metric=tf.contrib.factorization.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
config=self.config(3))
kmeans.fit(x=points, steps=30, batch_size=12)
centers = normalize(kmeans.clusters())
self.assertAllClose(sorted(centers.tolist()),
sorted(true_centers.tolist()),
atol=1e-2)
assignments = kmeans.predict(points, batch_size=12)
self.assertAllClose(centers[assignments],
true_centers[true_assignments], atol=1e-2)
score = kmeans.score(points, batch_size=12)
self.assertAllClose(score, true_score, atol=1e-2)
def test_fit_raise_if_num_clusters_larger_than_num_points_random_init(self):
points = np.array([[2.0, 3.0], [1.6, 8.2]], dtype=np.float32)
with self.assertRaisesOpError('less'):
kmeans = tf.contrib.factorization.KMeansClustering(
num_clusters=3, initial_clusters=tf.contrib.factorization.RANDOM_INIT)
kmeans.fit(x=points, steps=10, batch_size=8)
def test_fit_raise_if_num_clusters_larger_than_num_points_kmeans_plus_plus(
self):
points = np.array([[2.0, 3.0], [1.6, 8.2]], dtype=np.float32)
with self.assertRaisesOpError(AssertionError):
kmeans = tf.contrib.factorization.KMeansClustering(
num_clusters=3,
initial_clusters=tf.contrib.factorization.KMEANS_PLUS_PLUS_INIT)
kmeans.fit(x=points, steps=10, batch_size=8)
class MiniBatchKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
class KMeansBenchmark(benchmark.Benchmark):
"""Base class for benchmarks."""
def SetUp(self, dimension=50, num_clusters=50, points_per_cluster=10000,
center_norm=500, cluster_width=20):
np.random.seed(123456)
self.num_clusters = num_clusters
self.num_points = num_clusters * points_per_cluster
self.centers = make_random_centers(self.num_clusters, dimension,
center_norm=center_norm)
self.points, _, scores = make_random_points(self.centers, self.num_points,
max_offset=cluster_width)
self.score = float(np.sum(scores))
def _report(self, num_iters, start, end, scores):
print(scores)
self.report_benchmark(iters=num_iters, wall_time=(end - start) / num_iters,
extras={'true_sum_squared_distances': self.score,
'fit_scores': scores})
def _fit(self, num_iters=10):
pass
def benchmark_01_2dim_5center_500point(self):
self.SetUp(dimension=2, num_clusters=5, points_per_cluster=100)
self._fit()
def benchmark_02_20dim_20center_10kpoint(self):
self.SetUp(dimension=20, num_clusters=20, points_per_cluster=500)
self._fit()
def benchmark_03_100dim_50center_50kpoint(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000)
self._fit()
def benchmark_03_100dim_50center_50kpoint_unseparated(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000,
cluster_width=250)
self._fit()
def benchmark_04_100dim_500center_500kpoint(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000)
self._fit(num_iters=4)
def benchmark_05_100dim_500center_500kpoint_unseparated(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000,
cluster_width=250)
self._fit(num_iters=4)
class TensorflowKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting tensorflow KMeans: %d' % i)
tf_kmeans = tf.contrib.factorization.KMeansClustering(
self.num_clusters,
initial_clusters=tf.contrib.factorization.KMEANS_PLUS_PLUS_INIT,
kmeans_plus_plus_num_retries=int(math.log(self.num_clusters) + 2),
random_seed=i * 42,
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
tf_kmeans.fit(x=self.points, batch_size=self.num_points, steps=50,
relative_tolerance=1e-6)
_ = tf_kmeans.clusters()
scores.append(tf_kmeans.score(self.points))
self._report(num_iters, start, time.time(), scores)
class SklearnKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting sklearn KMeans: %d' % i)
sklearn_kmeans = SklearnKMeans(n_clusters=self.num_clusters,
init='k-means++',
max_iter=50, n_init=1, tol=1e-4,
random_state=i * 42)
sklearn_kmeans.fit(self.points)
scores.append(sklearn_kmeans.inertia_)
self._report(num_iters, start, time.time(), scores)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
has2k1/plotnine | plotnine/geoms/geom_vline.py | 1 | 2652 | from warnings import warn
import pandas as pd
import matplotlib.lines as mlines
from ..utils import make_iterable, SIZE_FACTOR, order_as_mapping_data
from ..exceptions import PlotnineWarning
from ..doctools import document
from ..mapping import aes
from .geom import geom
from .geom_segment import geom_segment
@document
class geom_vline(geom):
"""
Vertical line
{usage}
Parameters
----------
{common_parameters}
"""
DEFAULT_AES = {'color': 'black', 'linetype': 'solid',
'size': 0.5, 'alpha': 1}
REQUIRED_AES = {'xintercept'}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity',
'na_rm': False, 'inherit_aes': False}
def __init__(self, mapping=None, data=None, **kwargs):
mapping, data = order_as_mapping_data(mapping, data)
xintercept = kwargs.pop('xintercept', None)
if xintercept is not None:
if mapping:
warn("The 'xintercept' parameter has overridden "
"the aes() mapping.", PlotnineWarning)
data = pd.DataFrame({'xintercept': make_iterable(xintercept)})
mapping = aes(xintercept='xintercept')
kwargs['show_legend'] = False
geom.__init__(self, mapping, data, **kwargs)
def draw_panel(self, data, panel_params, coord, ax, **params):
"""
Plot all groups
"""
ranges = coord.backtransform_range(panel_params)
data['x'] = data['xintercept']
data['xend'] = data['xintercept']
data['y'] = ranges.y[0]
data['yend'] = ranges.y[1]
data = data.drop_duplicates()
for _, gdata in data.groupby('group'):
gdata.reset_index(inplace=True)
geom_segment.draw_group(gdata, panel_params,
coord, ax, **params)
@staticmethod
def draw_legend(data, da, lyr):
"""
Draw a vertical line in the box
Parameters
----------
data : dataframe
da : DrawingArea
lyr : layer
Returns
-------
out : DrawingArea
"""
x = [0.5 * da.width] * 2
y = [0, da.height]
data['size'] *= SIZE_FACTOR
key = mlines.Line2D(x,
y,
alpha=data['alpha'],
linestyle=data['linetype'],
linewidth=data['size'],
color=data['color'],
solid_capstyle='butt',
antialiased=False)
da.add_artist(key)
return da
| gpl-2.0 |
bavaria95/sentiment | sentiment.py | 1 | 2120 | import pandas as pd
import sys, os
import math, nltk, re, pickle
file_to_read = r"train.tsv"
train_data = pd.read_table(file_to_read)
size = train_data.count()[0]
#? lower case phrases
train_data['Unigrams'] = train_data['Phrase'].str.split()
bigrams = []
for i in range(size):
t = list(nltk.bigrams(train_data['Phrase'][i].split()))
t = list(map(lambda x: x[0] + ' ' + x[1], t))
bigrams.append(t)
train_data['Bigrams'] = bigrams
A = []
combs = [(0, 2), (0, 3), (0, 4), (1, 2), (1, 3), (2, 3), (2, 4)]
for i in range(5):
A.append(train_data[train_data['Sentiment'] == i].count()[0])
matrix = []
for index, number, phraseid, phrase, sentiment, unigrams, bigrams in train_data[0:1].itertuples(): #forAll
V = 0
V_uni = 0
V_bi = 0
features = [0] * len(combs)
for uni in unigrams:
uni = re.escape(uni)
C_uni = len(re.findall('\W?' + uni + '\W?', phrase))
features_tmp = []
for pair in combs:
N_t = train_data[(train_data['Sentiment'] == pair[0]) & (train_data['Phrase'].str.contains('\W' + uni + '\W'))].count()[0]
P_t = train_data[(train_data['Sentiment'] == pair[1]) & (train_data['Phrase'].str.contains('\W' + uni + '\W'))].count()[0]
if N_t != 0 and P_t != 0:
V_uni = C_uni * math.log((A[pair[0]] * P_t) / float((A[pair[1]] * N_t)), 2)
elif N_t != 0:
V_uni = C_uni * math.log(A[pair[0]] / float((A[pair[1]] * N_t)), 2)
elif P_t != 0:
V_uni = C_uni * math.log((A[pair[0]] * P_t) / float(A[pair[1]]), 2)
else:
V_uni = 0
print ('unigram = ' + uni + ', pair = ' + str(pair), ', C = ' + str(C_uni) + ', N = ' + str(A[pair[0]]) + ', P = ' + str(A[pair[1]]) + ', Pt = ' + str(P_t) + ', Nt = ' + str(N_t) + ', V = ' + str(V_uni))
features_tmp.append(V_uni)
print ('features_tmp = ' + str(features_tmp))
print ('features = ' + str(features))
print ('\n')
features_tmp = map(lambda x: 1.0 * x / len(unigrams), features_tmp)
features = [(x + y) for (x, y) in zip(features, features_tmp)]
matrix.append(features)
# print number
###for bigrams
with open('unigrams.dat', 'wb') as f:
pickle.dump(matrix, f) | mit |
dudulianangang/vps | Fie2d.py | 1 | 2019 | import sdf
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
n0 = 1.8e20
me = 9.1e-31
qe = 1.6e-19
ep = 8.9e-12
c = 3e8
wp = np.sqrt(n0*qe*qe/me/ep)
ld = c/wp
e0 = me*c*wp/qe
b0 = e0/c
nx = 3500
ny = 3500
lx = 3500
ly = 3500
fs = 16
grid_min_x = 0+100
grid_max_x = nx-100
grid_min_y = 0+100
grid_max_y = ny-100
Gx = np.linspace(0,lx,nx)
Gy = np.linspace(0,ly,ny)
gx = Gx[grid_min_x:grid_max_x]
gy = Gy[grid_min_y:grid_max_y]
jetcmap = plt.cm.get_cmap("rainbow", 9) #generate a jet map with 10 values
jet_vals = jetcmap(np.arange(9)) #extract those values as an array
jet_vals[0] = [1.0, 1, 1.0, 1] #change the first value
newcmap = mpl.colors.LinearSegmentedColormap.from_list("newjet", jet_vals)
for i in range(7):
file = '/Volumes/yaowp2016/njbx0.02/'
ii = (i)*5
fname = file+str(ii).zfill(4)+'.sdf'
datafile = sdf.read(fname)
# Ex = datafile.Electric_Field_Ex.data/e0
# Ey = datafile.Electric_Field_Ey.data/e0
# Ez = datafile.Electric_Field_Ez.data/e0
# Bx = datafile.Magnetic_Field_Bx.data/b0
# By = datafile.Magnetic_Field_By.data/b0
Bz = datafile.Magnetic_Field_Bz.data/b0
# ex = Ex[grid_min_x:grid_max_x,grid_min_y:grid_max_y]
# ey = Ey[grid_min_x:grid_max_x,grid_min_y:grid_max_y]
# ez = Ez[grid_min_x:grid_max_x,grid_min_y:grid_max_y]
# bx = Bx[grid_min_x:grid_max_x,grid_min_y:grid_max_y]
# by = By[grid_min_x:grid_max_x,grid_min_y:grid_max_y]
bz = Bz[grid_min_x:grid_max_x,grid_min_y:grid_max_y]
plt.figure(figsize=(11,5))
fig = plt.imshow(bz.transpose(),
extent=[min(gx),max(gx),min(gy),max(gy)],aspect='1',
# cmap=newcmap,
origin="lower",
interpolation='nearest',
vmin = -2,
vmax = 2,
)
fig.set_cmap('bwr')
plt.margins(0,0)
plt.title('Bz, normed by {:.3e} T'.format(b0))
# plt.title('Ez, normed by {:.3e} V/m'.format(e0))
plt.xlabel('x/$\lambda_e$',fontsize=fs)
plt.ylabel('y/$\lambda_e$',fontsize=fs)
plt.colorbar()
plt.savefig(file+'/plots/bzn_'+str(ii)+'.png',bbox_inches='tight') # n means normalized
plt.close() | apache-2.0 |
tyc85/nwsdr-3.6.3-dsc | gr-filter/examples/channelize.py | 13 | 6790 | #!/usr/bin/env python
#
# Copyright 2009,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
from gnuradio import filter
import sys, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 2000000 # number of samples to use
self._fs = 1000 # initial sampling rate
self._M = M = 9 # Number of channels to channelize
self._ifs = M*self._fs # initial sampling rate
# Create a set of taps for the PFB channelizer
self._taps = filter.firdes.low_pass_2(1, self._ifs, 475.50, 50,
attenuation_dB=100,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
# Create a set of signals at different frequencies
# freqs lists the frequencies of the signals that get stored
# in the list "signals", which then get summed together
self.signals = list()
self.add = gr.add_cc()
freqs = [-70, -50, -30, -10, 10, 20, 40, 60, 80]
for i in xrange(len(freqs)):
f = freqs[i] + (M/2-M+i+1)*self._fs
self.signals.append(gr.sig_source_c(self._ifs, gr.GR_SIN_WAVE, f, 1))
self.connect(self.signals[i], (self.add,i))
self.head = gr.head(gr.sizeof_gr_complex, self._N)
# Construct the channelizer filter
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps, 1)
# Construct a vector sink for the input signal to the channelizer
self.snk_i = gr.vector_sink_c()
# Connect the blocks
self.connect(self.add, self.head, self.pfb)
self.connect(self.add, self.snk_i)
# Use this to play with the channel mapping
#self.pfb.set_channel_map([5,6,7,8,0,1,2,3,4])
# Create a vector sink for each of M output channels of the filter and connect it
self.snks = list()
for i in xrange(self._M):
self.snks.append(gr.vector_sink_c())
self.connect((self.pfb, i), self.snks[i])
def main():
tstart = time.time()
tb = pfb_top_block()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig_in = pylab.figure(1, figsize=(16,9), facecolor="w")
fig1 = pylab.figure(2, figsize=(16,9), facecolor="w")
fig2 = pylab.figure(3, figsize=(16,9), facecolor="w")
Ns = 1000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._ifs
# Plot the input signal on its own figure
d = tb.snk_i.data()[Ns:Ne]
spin_f = fig_in.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(X))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
pin_f = spin_f.plot(f_in, X_in, "b")
spin_f.set_xlim([min(f_in), max(f_in)+1])
spin_f.set_ylim([-200.0, 50.0])
spin_f.set_title("Input Signal", weight="bold")
spin_f.set_xlabel("Frequency (Hz)")
spin_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
spin_t = fig_in.add_subplot(2, 1, 2)
pin_t = spin_t.plot(t_in, x_in.real, "b")
pin_t = spin_t.plot(t_in, x_in.imag, "r")
spin_t.set_xlabel("Time (s)")
spin_t.set_ylabel("Amplitude")
Ncols = int(scipy.floor(scipy.sqrt(tb._M)))
Nrows = int(scipy.floor(tb._M / Ncols))
if(tb._M % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = tb._fs
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
for i in xrange(len(tb.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = tb.snks[i].data()[Ns:Ne]
sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(X))
f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
p2_f = sp1_f.plot(f_o, X_o, "b")
sp1_f.set_xlim([min(f_o), max(f_o)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title(("Channel %d" % i), weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i)
p2_o = sp2_o.plot(t_o, x_o.real, "b")
p2_o = sp2_o.plot(t_o, x_o.imag, "r")
sp2_o.set_xlim([min(t_o), max(t_o)+1])
sp2_o.set_ylim([-2, 2])
sp2_o.set_title(("Channel %d" % i), weight="bold")
sp2_o.set_xlabel("Time (s)")
sp2_o.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
lukiz/pacman | pacman.py | 2 | 7656 | # -*- coding: utf-8 -*-
import os
import pika
from pika import exceptions
from influxdb import InfluxDBClient
import time
import toml
import json
from datetime import datetime
import pymssql
import pandas.io.sql as psql
from ZabbixReader import ZabbixReader
CONF_FILE = '/conf/conf.toml'
## MS_SQL QUERY CONFIG
NODEBs_CONF = ''
NODEB_CONF_READ_INTERVAL_h = 24
NODEB_CONF_READ_TIMESTAMP = 0
def read_conf(CONF_FILE):
with open(CONF_FILE) as conffile:
conf = toml.loads(conffile.read())
return conf
def transform_func(body):
# before forwarding data, perform operations on fetched data, before forwarding them further
global CONF
global NODEBs_CONF
global NODEB_CONF_READ_TIMESTAMP
if 'transform' in CONF.viewkeys():
if 'mssql' in CONF['transform'].viewkeys():
body = json.loads(body)
## query MSSQL if config is empty or only once per given NODEB_CONF_READ_INTERVAL_h
if(NODEBs_CONF=='' or ((time.time() - NODEB_CONF_READ_TIMESTAMP) > (60*60*NODEB_CONF_READ_INTERVAL_h))):
NODEB_CONF_READ_TIMESTAMP = time.time()
MSSQL_CFG = CONF['transform']['mssql']
mssql_conn = pymssql.connect(host=MSSQL_CFG['url'], user=MSSQL_CFG['username'], password=MSSQL_CFG['password'], database=MSSQL_CFG['database'], as_dict=True, charset='utf8')
sql = """SELECT IP_ADDRESS,ADDRESS, SITE4G_NAME, ZONE, GEOHASH AS geohash FROM objects"""
config_data = psql.read_sql(sql, mssql_conn, index_col='IP_ADDRESS')
NODEBs_CONF = config_data.T.to_dict()
mssql_conn.close()
for object_x in body:
try:
# print 'try to add tags..'
object_x['tags'].update(NODEBs_CONF[object_x['tags']['host']])
except:
# print '....except'
object_x['tags'].update({'ADDRESS': 'Null','SITE4G_NAME': 'Null', 'ZONE': 0,'geohash': 'Null'})
body = json.dumps(body)
return body
def send_to_rabbit(body):
global CONF
print ('Rabbit output conn params', CONF['output']['rabbitmq'])
if 'ssl_options' in CONF['output']['rabbitmq'].viewkeys():
sslOptions = CONF['output']['rabbitmq']['ssl_options']
else:
sslOptions = ''
connectionParams = []
rmqaccess = CONF['output']['rabbitmq']
credentials = pika.PlainCredentials(rmqaccess['username'], rmqaccess['password'])
for rabbit in CONF['output']['rabbitmq']['host']:
connection_x = pika.ConnectionParameters(rabbit['url']
,rabbit['port']
,rmqaccess['vhost']
,credentials
,ssl = rmqaccess['ssl']
,ssl_options = sslOptions)
connectionParams.append(connection_x)
connection = connect_to_rabbit_node(connectionParams)
channel = connection.channel()
channel.basic_publish(exchange='',routing_key=rmqaccess['queue_name'],body=body,properties=pika.BasicProperties(delivery_mode = 2))
print (" [x] Sent to Rabbit %r" % body)
connection.close()
def send_to_influx(body):
global CONF
print ('Infludb output conn params', CONF['output']['influxdb'])
INFLUX_CFG = CONF['output']['influxdb']
client = InfluxDBClient(INFLUX_CFG['url'], INFLUX_CFG['port'], INFLUX_CFG['username'], INFLUX_CFG['password'], INFLUX_CFG['database'])
#client.create_database(INFLUX_CFG['database'])
body = json.loads(body)
if not isinstance(body,list):
tmp_list = []
tmp_list.append(body)
body = tmp_list
#print("Write points to InfluxDB: {0}".format(body))
res = client.write_points(body, time_precision=INFLUX_CFG['precision'])
# metoda nasluchuje na odbir danych po otrzymaniu wysyla do bazy MongoDB
def callback(ch, method, properties, body):
#print(" [x] Received %r" % body)
body = transform_func(body)
global CONF
if 'influxdb' in CONF['output'].viewkeys():
send_to_influx(body)
if 'rabbitmq' in CONF['output'].viewkeys():
send_to_rabbit(body)
ch.basic_ack(delivery_tag = method.delivery_tag)
def connect_to_rabbit_node(connectionParams):
# polacz sie z pierwszym dostepnym nodem rabbitowym z listy
i=-1
while True:
try:
# id of rabbit node on the list
i=(i+1)%len(connectionParams)
# Step #1: Connect to RabbitMQ using the default parameters
connection = pika.BlockingConnection(connectionParams[i])
return connection
except exceptions.AMQPConnectionError as e:
print "Rabbitmq Connection error " + e.message
pass
except:
print "Unexpected error:"
raise
# metoda podlaczenia do RabbitMQ
def mRabbitMQConnector():
global CONF
print ('Rabbit Input conn params', CONF['input']['rabbitmq'])
if 'ssl_options' in CONF['input']['rabbitmq'].viewkeys():
sslOptions = CONF['input']['rabbitmq']['ssl_options']
else:
sslOptions = ''
connectionParams = []
rmqaccess = CONF['input']['rabbitmq']
credentials = pika.PlainCredentials(rmqaccess['username'], rmqaccess['password'])
for rabbit in CONF['input']['rabbitmq']['host']:
connection_x = pika.ConnectionParameters(rabbit['url']
,rabbit['port']
,rmqaccess['vhost']
,credentials
,ssl = rmqaccess['ssl']
,ssl_options = sslOptions)
connectionParams.append(connection_x)
connection = connect_to_rabbit_node(connectionParams)
channel = connection.channel()
# opcjonalna deklaracja kolejki
# channel.queue_declare(queue=CONF['input']['rabbitmq'][0]['queue_name'], durable=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.basic_qos(prefetch_count=1)
channel.basic_consume(callback,
queue=rmqaccess['queue_name'])
channel.start_consuming()
print(' [*] BYE')
def getZabbixData():
""" Get data from Zabbix based on configuration, send to RMq """
# zabbix connection
global CONF
ZAB_CONF = CONF['input']['zabbix']
zab_url = ZAB_CONF['url']
zab_user = ZAB_CONF['user']
zab_password = ZAB_CONF['password']
zr = ZabbixReader(zab_url, zab_user, zab_password)
# iterate through groups defined in conf and send data to RMq
try:
for group in CONF['input']['zabbix']['group']:
stats = zr.get_stats_from_gname_json(group['gname'])
send_to_rabbit(stats)
except Exception as e:
print e
pass
# iterate through hosts defined in conf and send data to RMq
try:
for host in CONF['input']['zabbix']['host']:
stats = zr.get_stats_from_hname_json(host['hname'])
send_to_rabbit(stats)
except Exception as e:
print e
pass
def mZabbixConnector():
global CONF
CONF = read_conf(CONF_FILE)
sleep_time = CONF['input']['zabbix']['repeat_time']
while True:
getZabbixData()
print "Sleep for %d seconds ..zzz..zzz..zzz..." % sleep_time
time.sleep(sleep_time)
if __name__ == '__main__':
global CONF
CONF = read_conf(CONF_FILE)
# check for zabbix input configuration
try:
CONF['input']['zabbix']
except KeyError as e:
print "No valid input for zabbix in file: [%s] " % CONF_FILE
else:
print "pacman in Zabbix mode"
mZabbixConnector()
# check for rabbit input configuration
try:
CONF['input']['rabbitmq']
except KeyError as e:
print "No valid input for rabbit in file: [%s] " % CONF_FILE
else:
print "pacman in Rabbit mode"
mRabbitMQConnector()
| mit |
bert9bert/statsmodels | statsmodels/tsa/statespace/tests/test_models.py | 2 | 6374 | """
Tests for miscellaneous models
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
import re
import warnings
from statsmodels.tsa.statespace import mlemodel
from statsmodels import datasets
from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_raises
from nose.exc import SkipTest
from .results import results_sarimax
current_path = os.path.dirname(os.path.abspath(__file__))
class Intercepts(mlemodel.MLEModel):
"""
Test class for observation and state intercepts (which usually don't
get tested in other models).
"""
def __init__(self, endog, **kwargs):
k_states = 3
k_posdef = 3
super(Intercepts, self).__init__(
endog, k_states=k_states, k_posdef=k_posdef, **kwargs)
self['design'] = np.eye(3)
self['obs_cov'] = np.eye(3)
self['transition'] = np.eye(3)
self['selection'] = np.eye(3)
self['state_cov'] = np.eye(3)
self.initialize_approximate_diffuse()
@property
def param_names(self):
return ['d.1', 'd.2', 'd.3', 'c.1', 'c.2', 'c.3']
@property
def start_params(self):
return np.arange(6)
def update(self, params, **kwargs):
params = super(Intercepts, self).update(params, **kwargs)
self['obs_intercept'] = params[:3]
self['state_intercept'] = params[3:]
class TestIntercepts(object):
@classmethod
def setup_class(cls, which='mixed', **kwargs):
# Results
path = current_path + os.sep + 'results/results_intercepts_R.csv'
cls.desired = pd.read_csv(path)
# Data
dta = datasets.macrodata.load_pandas().data
dta.index = pd.date_range(start='1959-01-01', end='2009-7-01',
freq='QS')
obs = dta[['realgdp', 'realcons', 'realinv']].copy()
obs = obs / obs.std()
if which == 'all':
obs.ix[:50, :] = np.nan
obs.ix[119:130, :] = np.nan
elif which == 'partial':
obs.ix[0:50, 0] = np.nan
obs.ix[119:130, 0] = np.nan
elif which == 'mixed':
obs.ix[0:50, 0] = np.nan
obs.ix[19:70, 1] = np.nan
obs.ix[39:90, 2] = np.nan
obs.ix[119:130, 0] = np.nan
obs.ix[119:130, 2] = np.nan
mod = Intercepts(obs, **kwargs)
cls.params = np.arange(6) + 1
cls.model = mod
cls.results = mod.smooth(cls.params, return_ssm=True)
# Calculate the determinant of the covariance matrices (for easy
# comparison to other languages without having to store 2-dim arrays)
cls.results.det_scaled_smoothed_estimator_cov = (
np.zeros((1, cls.model.nobs)))
cls.results.det_predicted_state_cov = np.zeros((1, cls.model.nobs))
cls.results.det_smoothed_state_cov = np.zeros((1, cls.model.nobs))
cls.results.det_smoothed_state_disturbance_cov = (
np.zeros((1, cls.model.nobs)))
for i in range(cls.model.nobs):
cls.results.det_scaled_smoothed_estimator_cov[0, i] = (
np.linalg.det(
cls.results.scaled_smoothed_estimator_cov[:, :, i]))
cls.results.det_predicted_state_cov[0, i] = np.linalg.det(
cls.results.predicted_state_cov[:, :, i+1])
cls.results.det_smoothed_state_cov[0, i] = np.linalg.det(
cls.results.smoothed_state_cov[:, :, i])
cls.results.det_smoothed_state_disturbance_cov[0, i] = (
np.linalg.det(
cls.results.smoothed_state_disturbance_cov[:, :, i]))
def test_loglike(self):
assert_allclose(np.sum(self.results.llf_obs), -7924.03893566)
def test_scaled_smoothed_estimator(self):
assert_allclose(
self.results.scaled_smoothed_estimator.T,
self.desired[['r1', 'r2', 'r3']]
)
def test_scaled_smoothed_estimator_cov(self):
assert_allclose(
self.results.det_scaled_smoothed_estimator_cov.T,
self.desired[['detN']]
)
def test_forecasts(self):
assert_allclose(
self.results.forecasts.T,
self.desired[['m1', 'm2', 'm3']]
)
def test_forecasts_error(self):
assert_allclose(
self.results.forecasts_error.T,
self.desired[['v1', 'v2', 'v3']]
)
def test_forecasts_error_cov(self):
assert_allclose(
self.results.forecasts_error_cov.diagonal(),
self.desired[['F1', 'F2', 'F3']]
)
def test_predicted_states(self):
assert_allclose(
self.results.predicted_state[:, 1:].T,
self.desired[['a1', 'a2', 'a3']]
)
def test_predicted_states_cov(self):
assert_allclose(
self.results.det_predicted_state_cov.T,
self.desired[['detP']]
)
def test_smoothed_states(self):
assert_allclose(
self.results.smoothed_state.T,
self.desired[['alphahat1', 'alphahat2', 'alphahat3']]
)
def test_smoothed_states_cov(self):
assert_allclose(
self.results.det_smoothed_state_cov.T,
self.desired[['detV']]
)
def test_smoothed_forecasts(self):
assert_allclose(
self.results.smoothed_forecasts.T,
self.desired[['muhat1', 'muhat2', 'muhat3']]
)
def test_smoothed_state_disturbance(self):
assert_allclose(
self.results.smoothed_state_disturbance.T,
self.desired[['etahat1', 'etahat2', 'etahat3']]
)
def test_smoothed_state_disturbance_cov(self):
assert_allclose(
self.results.det_smoothed_state_disturbance_cov.T,
self.desired[['detVeta']]
)
def test_smoothed_measurement_disturbance(self):
assert_allclose(
self.results.smoothed_measurement_disturbance.T,
self.desired[['epshat1', 'epshat2', 'epshat3']], atol=1e-9
)
def test_smoothed_measurement_disturbance_cov(self):
assert_allclose(
self.results.smoothed_measurement_disturbance_cov.diagonal(),
self.desired[['Veps1', 'Veps2', 'Veps3']]
)
| bsd-3-clause |
DGrady/pandas | scripts/touchup_gh_issues.py | 6 | 1060 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from collections import OrderedDict
import sys
import re
"""
Reads in stdin, replace all occurences of '#num' or 'GH #num' with
links to github issue. dumps the issue anchors before the next
section header
"""
pat = "((?:\s*GH\s*)?)#(\d{3,4})([^_]|$)?"
rep_pat = r"\1GH\2_\3"
anchor_pat = ".. _GH{id}: https://github.com/pandas-dev/pandas/issues/{id}"
section_pat = "^pandas\s[\d\.]+\s*$"
def main():
issues = OrderedDict()
while True:
line = sys.stdin.readline()
if not line:
break
if re.search(section_pat, line):
for id in issues:
print(anchor_pat.format(id=id).rstrip())
if issues:
print("\n")
issues = OrderedDict()
for m in re.finditer(pat, line):
id = m.group(2)
if id not in issues:
issues[id] = True
print(re.sub(pat, rep_pat, line).rstrip())
pass
if __name__ == "__main__":
main()
| bsd-3-clause |
Sunhick/ThinkStats2 | code/brfss.py | 69 | 4708 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import math
import sys
import pandas
import numpy as np
import thinkstats2
import thinkplot
def Summarize(df, column, title):
"""Print summary statistics male, female and all."""
items = [
('all', df[column]),
('male', df[df.sex == 1][column]),
('female', df[df.sex == 2][column]),
]
print(title)
print('key\tn\tmean\tvar\tstd\tcv')
for key, series in items:
mean, var = series.mean(), series.var()
std = math.sqrt(var)
cv = std / mean
t = key, len(series), mean, var, std, cv
print('%s\t%d\t%4.2f\t%4.2f\t%4.2f\t%4.4f' % t)
def CleanBrfssFrame(df):
"""Recodes BRFSS variables.
df: DataFrame
"""
# clean age
df.age.replace([7, 9], float('NaN'), inplace=True)
# clean height
df.htm3.replace([999], float('NaN'), inplace=True)
# clean weight
df.wtkg2.replace([99999], float('NaN'), inplace=True)
df.wtkg2 /= 100.0
# clean weight a year ago
df.wtyrago.replace([7777, 9999], float('NaN'), inplace=True)
df['wtyrago'] = df.wtyrago.apply(lambda x: x/2.2 if x < 9000 else x-9000)
def ReadBrfss(filename='CDBRFS08.ASC.gz', compression='gzip', nrows=None):
"""Reads the BRFSS data.
filename: string
compression: string
nrows: int number of rows to read, or None for all
returns: DataFrame
"""
var_info = [
('age', 101, 102, int),
('sex', 143, 143, int),
('wtyrago', 127, 130, int),
('finalwt', 799, 808, int),
('wtkg2', 1254, 1258, int),
('htm3', 1251, 1253, int),
]
columns = ['name', 'start', 'end', 'type']
variables = pandas.DataFrame(var_info, columns=columns)
variables.end += 1
dct = thinkstats2.FixedWidthVariables(variables, index_base=1)
df = dct.ReadFixedWidth(filename, compression=compression, nrows=nrows)
CleanBrfssFrame(df)
return df
def MakeNormalModel(weights):
"""Plots a CDF with a Normal model.
weights: sequence
"""
cdf = thinkstats2.Cdf(weights, label='weights')
mean, var = thinkstats2.TrimmedMeanVar(weights)
std = math.sqrt(var)
print('n, mean, std', len(weights), mean, std)
xmin = mean - 4 * std
xmax = mean + 4 * std
xs, ps = thinkstats2.RenderNormalCdf(mean, std, xmin, xmax)
thinkplot.Plot(xs, ps, label='model', linewidth=4, color='0.8')
thinkplot.Cdf(cdf)
def MakeNormalPlot(weights):
"""Generates a normal probability plot of birth weights.
weights: sequence
"""
mean, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
std = math.sqrt(var)
xs = [-5, 5]
xs, ys = thinkstats2.FitLine(xs, mean, std)
thinkplot.Plot(xs, ys, color='0.8', label='model')
xs, ys = thinkstats2.NormalProbability(weights)
thinkplot.Plot(xs, ys, label='weights')
def MakeFigures(df):
"""Generates CDFs and normal prob plots for weights and log weights."""
weights = df.wtkg2.dropna()
log_weights = np.log10(weights)
# plot weights on linear and log scales
thinkplot.PrePlot(cols=2)
MakeNormalModel(weights)
thinkplot.Config(xlabel='adult weight (kg)', ylabel='CDF')
thinkplot.SubPlot(2)
MakeNormalModel(log_weights)
thinkplot.Config(xlabel='adult weight (log10 kg)')
thinkplot.Save(root='brfss_weight')
# make normal probability plots on linear and log scales
thinkplot.PrePlot(cols=2)
MakeNormalPlot(weights)
thinkplot.Config(xlabel='z', ylabel='weights (kg)')
thinkplot.SubPlot(2)
MakeNormalPlot(log_weights)
thinkplot.Config(xlabel='z', ylabel='weights (log10 kg)')
thinkplot.Save(root='brfss_weight_normal')
def main(script, nrows=1000):
"""Tests the functions in this module.
script: string script name
"""
thinkstats2.RandomSeed(17)
nrows = int(nrows)
df = ReadBrfss(nrows=nrows)
MakeFigures(df)
Summarize(df, 'htm3', 'Height (cm):')
Summarize(df, 'wtkg2', 'Weight (kg):')
Summarize(df, 'wtyrago', 'Weight year ago (kg):')
if nrows == 1000:
assert(df.age.value_counts()[40] == 28)
assert(df.sex.value_counts()[2] == 668)
assert(df.wtkg2.value_counts()[90.91] == 49)
assert(df.wtyrago.value_counts()[160/2.2] == 49)
assert(df.htm3.value_counts()[163] == 103)
assert(df.finalwt.value_counts()[185.870345] == 13)
print('%s: All tests passed.' % script)
if __name__ == '__main__':
main(*sys.argv)
| gpl-3.0 |
bsipocz/statsmodels | statsmodels/sandbox/tsa/fftarma.py | 30 | 16438 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 14 19:53:25 2009
Author: josef-pktd
generate arma sample using fft with all the lfilter it looks slow
to get the ma representation first
apply arma filter (in ar representation) to time series to get white noise
but seems slow to be useful for fast estimation for nobs=10000
change/check: instead of using marep, use fft-transform of ar and ma
separately, use ratio check theory is correct and example works
DONE : feels much faster than lfilter
-> use for estimation of ARMA
-> use pade (scipy.misc) approximation to get starting polynomial
from autocorrelation (is autocorrelation of AR(p) related to marep?)
check if pade is fast, not for larger arrays ?
maybe pade doesn't do the right thing for this, not tried yet
scipy.pade([ 1. , 0.6, 0.25, 0.125, 0.0625, 0.1],2)
raises LinAlgError: singular matrix
also doesn't have roots inside unit circle ??
-> even without initialization, it might be fast for estimation
-> how do I enforce stationarity and invertibility,
need helper function
get function drop imag if close to zero from numpy/scipy source, where?
"""
from __future__ import print_function
import numpy as np
import numpy.fft as fft
#import scipy.fftpack as fft
from scipy import signal
#from try_var_convolve import maxabs
from statsmodels.sandbox.archive.linalg_decomp_1 import OneTimeProperty
from statsmodels.tsa.arima_process import ArmaProcess
#trying to convert old experiments to a class
class ArmaFft(ArmaProcess):
'''fft tools for arma processes
This class contains several methods that are providing the same or similar
returns to try out and test different implementations.
Notes
-----
TODO:
check whether we don't want to fix maxlags, and create new instance if
maxlag changes. usage for different lengths of timeseries ?
or fix frequency and length for fft
check default frequencies w, terminology norw n_or_w
some ffts are currently done without padding with zeros
returns for spectral density methods needs checking, is it always the power
spectrum hw*hw.conj()
normalization of the power spectrum, spectral density: not checked yet, for
example no variance of underlying process is used
'''
def __init__(self, ar, ma, n):
#duplicates now that are subclassing ArmaProcess
super(ArmaFft, self).__init__(ar, ma)
self.ar = np.asarray(ar)
self.ma = np.asarray(ma)
self.nobs = n
#could make the polynomials into cached attributes
self.arpoly = np.polynomial.Polynomial(ar)
self.mapoly = np.polynomial.Polynomial(ma)
self.nar = len(ar) #1d only currently
self.nma = len(ma)
def padarr(self, arr, maxlag, atend=True):
'''pad 1d array with zeros at end to have length maxlag
function that is a method, no self used
Parameters
----------
arr : array_like, 1d
array that will be padded with zeros
maxlag : int
length of array after padding
atend : boolean
If True (default), then the zeros are added to the end, otherwise
to the front of the array
Returns
-------
arrp : ndarray
zero-padded array
Notes
-----
This is mainly written to extend coefficient arrays for the lag-polynomials.
It returns a copy.
'''
if atend:
return np.r_[arr, np.zeros(maxlag-len(arr))]
else:
return np.r_[np.zeros(maxlag-len(arr)), arr]
def pad(self, maxlag):
'''construct AR and MA polynomials that are zero-padded to a common length
Parameters
----------
maxlag : int
new length of lag-polynomials
Returns
-------
ar : ndarray
extended AR polynomial coefficients
ma : ndarray
extended AR polynomial coefficients
'''
arpad = np.r_[self.ar, np.zeros(maxlag-self.nar)]
mapad = np.r_[self.ma, np.zeros(maxlag-self.nma)]
return arpad, mapad
def fftar(self, n=None):
'''Fourier transform of AR polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial
'''
if n is None:
n = len(self.ar)
return fft.fft(self.padarr(self.ar, n))
def fftma(self, n):
'''Fourier transform of MA polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial
'''
if n is None:
n = len(self.ar)
return fft.fft(self.padarr(self.ma, n))
#@OneTimeProperty # not while still debugging things
def fftarma(self, n=None):
'''Fourier transform of ARMA polynomial, zero-padded at end to n
The Fourier transform of the ARMA process is calculated as the ratio
of the fft of the MA polynomial divided by the fft of the AR polynomial.
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftarma : ndarray
fft of zero-padded arma polynomial
'''
if n is None:
n = self.nobs
return (self.fftma(n) / self.fftar(n))
def spd(self, npos):
'''raw spectral density, returns Fourier transform
n is number of points in positive spectrum, the actual number of points
is twice as large. different from other spd methods with fft
'''
n = npos
w = fft.fftfreq(2*n) * 2 * np.pi
hw = self.fftarma(2*n) #not sure, need to check normalization
#return (hw*hw.conj()).real[n//2-1:] * 0.5 / np.pi #doesn't show in plot
return (hw*hw.conj()).real * 0.5 / np.pi, w
def spdshift(self, n):
'''power spectral density using fftshift
currently returns two-sided according to fft frequencies, use first half
'''
#size = s1+s2-1
mapadded = self.padarr(self.ma, n)
arpadded = self.padarr(self.ar, n)
hw = fft.fft(fft.fftshift(mapadded)) / fft.fft(fft.fftshift(arpadded))
#return np.abs(spd)[n//2-1:]
w = fft.fftfreq(n) * 2 * np.pi
wslice = slice(n//2-1, None, None)
#return (hw*hw.conj()).real[wslice], w[wslice]
return (hw*hw.conj()).real, w
def spddirect(self, n):
'''power spectral density using padding to length n done by fft
currently returns two-sided according to fft frequencies, use first half
'''
#size = s1+s2-1
#abs looks wrong
hw = fft.fft(self.ma, n) / fft.fft(self.ar, n)
w = fft.fftfreq(n) * 2 * np.pi
wslice = slice(None, n//2, None)
#return (np.abs(hw)**2)[wslice], w[wslice]
return (np.abs(hw)**2) * 0.5/np.pi, w
def _spddirect2(self, n):
'''this looks bad, maybe with an fftshift
'''
#size = s1+s2-1
hw = (fft.fft(np.r_[self.ma[::-1],self.ma], n)
/ fft.fft(np.r_[self.ar[::-1],self.ar], n))
return (hw*hw.conj()) #.real[n//2-1:]
def spdroots(self, w):
'''spectral density for frequency using polynomial roots
builds two arrays (number of roots, number of frequencies)
'''
return self.spdroots_(self.arroots, self.maroots, w)
def spdroots_(self, arroots, maroots, w):
'''spectral density for frequency using polynomial roots
builds two arrays (number of roots, number of frequencies)
Parameters
----------
arroots : ndarray
roots of ar (denominator) lag-polynomial
maroots : ndarray
roots of ma (numerator) lag-polynomial
w : array_like
frequencies for which spd is calculated
Notes
-----
this should go into a function
'''
w = np.atleast_2d(w).T
cosw = np.cos(w)
#Greene 5th edt. p626, section 20.2.7.a.
maroots = 1./maroots
arroots = 1./arroots
num = 1 + maroots**2 - 2* maroots * cosw
den = 1 + arroots**2 - 2* arroots * cosw
#print 'num.shape, den.shape', num.shape, den.shape
hw = 0.5 / np.pi * num.prod(-1) / den.prod(-1) #or use expsumlog
return np.squeeze(hw), w.squeeze()
def spdpoly(self, w, nma=50):
'''spectral density from MA polynomial representation for ARMA process
References
----------
Cochrane, section 8.3.3
'''
mpoly = np.polynomial.Polynomial(self.arma2ma(nma))
hw = mpoly(np.exp(1j * w))
spd = np.real_if_close(hw * hw.conj() * 0.5/np.pi)
return spd, w
def filter(self, x):
'''
filter a timeseries with the ARMA filter
padding with zero is missing, in example I needed the padding to get
initial conditions identical to direct filter
Initial filtered observations differ from filter2 and signal.lfilter, but
at end they are the same.
See Also
--------
tsa.filters.fftconvolve
'''
n = x.shape[0]
if n == self.fftarma:
fftarma = self.fftarma
else:
fftarma = self.fftma(n) / self.fftar(n)
tmpfft = fftarma * fft.fft(x)
return fft.ifft(tmpfft)
def filter2(self, x, pad=0):
'''filter a time series using fftconvolve3 with ARMA filter
padding of x currently works only if x is 1d
in example it produces same observations at beginning as lfilter even
without padding.
TODO: this returns 1 additional observation at the end
'''
from statsmodels.tsa.filters import fftconvolve3
if not pad:
pass
elif pad == 'auto':
#just guessing how much padding
x = self.padarr(x, x.shape[0] + 2*(self.nma+self.nar), atend=False)
else:
x = self.padarr(x, x.shape[0] + int(pad), atend=False)
return fftconvolve3(x, self.ma, self.ar)
def acf2spdfreq(self, acovf, nfreq=100, w=None):
'''
not really a method
just for comparison, not efficient for large n or long acf
this is also similarly use in tsa.stattools.periodogram with window
'''
if w is None:
w = np.linspace(0, np.pi, nfreq)[:, None]
nac = len(acovf)
hw = 0.5 / np.pi * (acovf[0] +
2 * (acovf[1:] * np.cos(w*np.arange(1,nac))).sum(1))
return hw
def invpowerspd(self, n):
'''autocovariance from spectral density
scaling is correct, but n needs to be large for numerical accuracy
maybe padding with zero in fft would be faster
without slicing it returns 2-sided autocovariance with fftshift
>>> ArmaFft([1, -0.5], [1., 0.4], 40).invpowerspd(2**8)[:10]
array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,
0.045 , 0.0225 , 0.01125 , 0.005625])
>>> ArmaFft([1, -0.5], [1., 0.4], 40).acovf(10)
array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,
0.045 , 0.0225 , 0.01125 , 0.005625])
'''
hw = self.fftarma(n)
return np.real_if_close(fft.ifft(hw*hw.conj()), tol=200)[:n]
def spdmapoly(self, w, twosided=False):
'''ma only, need division for ar, use LagPolynomial
'''
if w is None:
w = np.linspace(0, np.pi, nfreq)
return 0.5 / np.pi * self.mapoly(np.exp(w*1j))
def plot4(self, fig=None, nobs=100, nacf=20, nfreq=100):
rvs = self.generate_sample(nsample=100, burnin=500)
acf = self.acf(nacf)[:nacf] #TODO: check return length
pacf = self.pacf(nacf)
w = np.linspace(0, np.pi, nfreq)
spdr, wr = self.spdroots(w)
if fig is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
ax.plot(rvs)
ax.set_title('Random Sample \nar=%s, ma=%s' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,2)
ax.plot(acf)
ax.set_title('Autocorrelation \nar=%s, ma=%rs' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,3)
ax.plot(wr, spdr)
ax.set_title('Power Spectrum \nar=%s, ma=%s' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,4)
ax.plot(pacf)
ax.set_title('Partial Autocorrelation \nar=%s, ma=%s' % (self.ar, self.ma))
return fig
def spdar1(ar, w):
if np.ndim(ar) == 0:
rho = ar
else:
rho = -ar[1]
return 0.5 / np.pi /(1 + rho*rho - 2 * rho * np.cos(w))
if __name__ == '__main__':
def maxabs(x,y):
return np.max(np.abs(x-y))
nobs = 200 #10000
ar = [1, 0.0]
ma = [1, 0.0]
ar2 = np.zeros(nobs)
ar2[:2] = [1, -0.9]
uni = np.zeros(nobs)
uni[0]=1.
#arrep = signal.lfilter(ma, ar, ar2)
#marep = signal.lfilter([1],arrep, uni)
# same faster:
arcomb = np.convolve(ar, ar2, mode='same')
marep = signal.lfilter(ma,arcomb, uni) #[len(ma):]
print(marep[:10])
mafr = fft.fft(marep)
rvs = np.random.normal(size=nobs)
datafr = fft.fft(rvs)
y = fft.ifft(mafr*datafr)
print(np.corrcoef(np.c_[y[2:], y[1:-1], y[:-2]],rowvar=0))
arrep = signal.lfilter([1],marep, uni)
print(arrep[:20]) # roundtrip to ar
arfr = fft.fft(arrep)
yfr = fft.fft(y)
x = fft.ifft(arfr*yfr).real #imag part is e-15
# the next two are equal, roundtrip works
print(x[:5])
print(rvs[:5])
print(np.corrcoef(np.c_[x[2:], x[1:-1], x[:-2]],rowvar=0))
# ARMA filter using fft with ratio of fft of ma/ar lag polynomial
# seems much faster than using lfilter
#padding, note arcomb is already full length
arcombp = np.zeros(nobs)
arcombp[:len(arcomb)] = arcomb
map_ = np.zeros(nobs) #rename: map was shadowing builtin
map_[:len(ma)] = ma
ar0fr = fft.fft(arcombp)
ma0fr = fft.fft(map_)
y2 = fft.ifft(ma0fr/ar0fr*datafr)
#the next two are (almost) equal in real part, almost zero but different in imag
print(y2[:10])
print(y[:10])
print(maxabs(y, y2)) # from chfdiscrete
#1.1282071239631782e-014
ar = [1, -0.4]
ma = [1, 0.2]
arma1 = ArmaFft([1, -0.5,0,0,0,00, -0.7, 0.3], [1, 0.8], nobs)
nfreq = nobs
w = np.linspace(0, np.pi, nfreq)
w2 = np.linspace(0, 2*np.pi, nfreq)
import matplotlib.pyplot as plt
plt.close('all')
plt.figure()
spd1, w1 = arma1.spd(2**10)
print(spd1.shape)
_ = plt.plot(spd1)
plt.title('spd fft complex')
plt.figure()
spd2, w2 = arma1.spdshift(2**10)
print(spd2.shape)
_ = plt.plot(w2, spd2)
plt.title('spd fft shift')
plt.figure()
spd3, w3 = arma1.spddirect(2**10)
print(spd3.shape)
_ = plt.plot(w3, spd3)
plt.title('spd fft direct')
plt.figure()
spd3b = arma1._spddirect2(2**10)
print(spd3b.shape)
_ = plt.plot(spd3b)
plt.title('spd fft direct mirrored')
plt.figure()
spdr, wr = arma1.spdroots(w)
print(spdr.shape)
plt.plot(w, spdr)
plt.title('spd from roots')
plt.figure()
spdar1_ = spdar1(arma1.ar, w)
print(spdar1_.shape)
_ = plt.plot(w, spdar1_)
plt.title('spd ar1')
plt.figure()
wper, spdper = arma1.periodogram(nfreq)
print(spdper.shape)
_ = plt.plot(w, spdper)
plt.title('periodogram')
startup = 1000
rvs = arma1.generate_sample(startup+10000)[startup:]
import matplotlib.mlab as mlb
plt.figure()
sdm, wm = mlb.psd(x)
print('sdm.shape', sdm.shape)
sdm = sdm.ravel()
plt.plot(wm, sdm)
plt.title('matplotlib')
from nitime.algorithms import LD_AR_est
#yule_AR_est(s, order, Nfreqs)
wnt, spdnt = LD_AR_est(rvs, 10, 512)
plt.figure()
print('spdnt.shape', spdnt.shape)
_ = plt.plot(spdnt.ravel())
print(spdnt[:10])
plt.title('nitime')
fig = plt.figure()
arma1.plot4(fig)
#plt.show()
| bsd-3-clause |
robbymeals/scikit-learn | sklearn/cluster/birch.py | 207 | 22706 | # Authors: Manoj Kumar <[email protected]>
# Alexandre Gramfort <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import NotFittedError, check_is_fitted
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, insted of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accomodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
| bsd-3-clause |
chuckchen/spark | python/pyspark/sql/context.py | 3 | 23888 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from pyspark import since, _NoValue
from pyspark.sql.session import _monkey_patch_RDD, SparkSession
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.udf import UDFRegistration # noqa: F401
from pyspark.sql.utils import install_exception_handler
__all__ = ["SQLContext", "HiveContext"]
class SQLContext(object):
"""The entry point for working with structured data (rows and columns) in Spark, in Spark 1.x.
As of Spark 2.0, this is replaced by :class:`SparkSession`. However, we are keeping the class
here for backward compatibility.
A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
.. deprecated:: 3.0.0
Use :func:`SparkSession.builder.getOrCreate()` instead.
Parameters
----------
sparkContext : :class:`SparkContext`
The :class:`SparkContext` backing this SQLContext.
sparkSession : :class:`SparkSession`
The :class:`SparkSession` around which this SQLContext wraps.
jsqlContext : optional
An optional JVM Scala SQLContext. If set, we do not instantiate a new
SQLContext in the JVM, instead we make all calls to this object.
This is only for internal.
Examples
--------
>>> from datetime import datetime
>>> from pyspark.sql import Row
>>> sqlContext = SQLContext(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, 'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
_instantiatedContext = None
def __init__(self, sparkContext, sparkSession=None, jsqlContext=None):
if sparkSession is None:
warnings.warn(
"Deprecated in 3.0.0. Use SparkSession.builder.getOrCreate() instead.",
DeprecationWarning)
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if sparkSession is None:
sparkSession = SparkSession.builder.getOrCreate()
if jsqlContext is None:
jsqlContext = sparkSession._jwrapped
self.sparkSession = sparkSession
self._jsqlContext = jsqlContext
_monkey_patch_RDD(self.sparkSession)
install_exception_handler()
if (SQLContext._instantiatedContext is None
or SQLContext._instantiatedContext._sc._jsc is None):
SQLContext._instantiatedContext = self
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
return self._jsqlContext
@property
def _conf(self):
"""Accessor for the JVM SQL-specific configurations"""
return self.sparkSession._jsparkSession.sessionState().conf()
@classmethod
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
.. versionadded:: 1.6.0
.. deprecated:: 3.0.0
Use :func:`SparkSession.builder.getOrCreate()` instead.
Parameters
----------
sc : :class:`SparkContext`
"""
warnings.warn(
"Deprecated in 3.0.0. Use SparkSession.builder.getOrCreate() instead.",
DeprecationWarning)
if (cls._instantiatedContext is None
or SQLContext._instantiatedContext._sc._jsc is None):
jsqlContext = sc._jvm.SparkSession.builder().sparkContext(
sc._jsc.sc()).getOrCreate().sqlContext()
sparkSession = SparkSession(sc, jsqlContext.sparkSession())
cls(sc, sparkSession, jsqlContext)
return cls._instantiatedContext
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
.. versionadded:: 1.6.0
"""
return self.__class__(self._sc, self.sparkSession.newSession())
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
.. versionadded:: 1.3.0
"""
self.sparkSession.conf.set(key, value)
def getConf(self, key, defaultValue=_NoValue):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is set, return
defaultValue. If the key is not set and defaultValue is not set, return
the system default value.
.. versionadded:: 1.3.0
Examples
--------
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", "10")
'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", "50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", "10")
'50'
"""
return self.sparkSession.conf.get(key, defaultValue)
@property
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
.. versionadded:: 1.3.1
Returns
-------
:class:`UDFRegistration`
"""
return self.sparkSession.udf
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
.. versionadded:: 1.4.0
Parameters
----------
start : int
the start value
end : int, optional
the end value (exclusive)
step : int, optional
the incremental step (default: 1)
numPartitions : int, optional
the number of partitions of the DataFrame
Returns
-------
:class:`DataFrame`
Examples
--------
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
return self.sparkSession.range(start, end, step, numPartitions)
def registerFunction(self, name, f, returnType=None):
"""An alias for :func:`spark.udf.register`.
See :meth:`pyspark.sql.UDFRegistration.register`.
.. versionadded:: 1.2.0
.. deprecated:: 2.3.0
Use :func:`spark.udf.register` instead.
"""
warnings.warn(
"Deprecated in 2.3.0. Use spark.udf.register instead.",
DeprecationWarning)
return self.sparkSession.udf.register(name, f, returnType)
def registerJavaFunction(self, name, javaClassName, returnType=None):
"""An alias for :func:`spark.udf.registerJavaFunction`.
See :meth:`pyspark.sql.UDFRegistration.registerJavaFunction`.
.. versionadded:: 2.1.0
.. deprecated:: 2.3.0
Use :func:`spark.udf.registerJavaFunction` instead.
"""
warnings.warn(
"Deprecated in 2.3.0. Use spark.udf.registerJavaFunction instead.",
DeprecationWarning)
return self.sparkSession.udf.registerJavaFunction(name, javaClassName, returnType)
# TODO(andrew): delete this once we refactor things to take in SparkSession
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
Parameters
----------
rdd : :class:`RDD`
an RDD of Row or tuple
samplingRatio : float, optional
sampling ratio, or no sampling (default)
Returns
-------
:class:`pyspark.sql.types.StructType`
"""
return self.sparkSession._inferSchema(rdd, samplingRatio)
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
.. versionadded:: 1.3.0
.. versionchanged:: 2.0.0
The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
datatype string after 2.0.
If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.
.. versionchanged:: 2.1.0
Added verifySchema.
Parameters
----------
data : :class:`RDD` or iterable
an RDD of any kind of SQL data representation(e.g. :class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
schema : :class:`pyspark.sql.types.DataType`, str or list, optional
a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
samplingRatio : float, optional
the sample ratio of rows used for inferring
verifySchema : bool, optional
verify data types of every row against schema. Enabled by default.
Returns
-------
:class:`DataFrame`
Examples
--------
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()
[Row(_1='Alice', _2=1)]
>>> sqlContext.createDataFrame(l, ['name', 'age']).collect()
[Row(name='Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> sqlContext.createDataFrame(d).collect()
[Row(age=1, name='Alice')]
>>> rdd = sc.parallelize(l)
>>> sqlContext.createDataFrame(rdd).collect()
[Row(_1='Alice', _2=1)]
>>> df = sqlContext.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name='Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name='Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name='Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name='Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> sqlContext.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a='Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> sqlContext.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
return self.sparkSession.createDataFrame(data, schema, samplingRatio, verifySchema)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
.. versionadded:: 1.3.0
Examples
--------
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
df.createOrReplaceTempView(tableName)
def dropTempTable(self, tableName):
""" Remove the temporary table from catalog.
.. versionadded:: 1.6.0
Examples
--------
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> sqlContext.dropTempTable("table1")
"""
self.sparkSession.catalog.dropTempView(tableName)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
.. versionadded:: 1.3.0
Returns
-------
:class:`DataFrame`
"""
return self.sparkSession.catalog.createExternalTable(
tableName, path, source, schema, **options)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
.. versionadded:: 1.0.0
Returns
-------
:class:`DataFrame`
Examples
--------
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2='row1'), Row(f1=2, f2='row2'), Row(f1=3, f2='row3')]
"""
return self.sparkSession.sql(sqlQuery)
def table(self, tableName):
"""Returns the specified table or view as a :class:`DataFrame`.
.. versionadded:: 1.0.0
Returns
-------
:class:`DataFrame`
Examples
--------
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return self.sparkSession.table(tableName)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
.. versionadded:: 1.3.0
Parameters
----------
dbName: str, optional
name of the database to use.
Returns
-------
:class:`DataFrame`
Examples
--------
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(database='', tableName='table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
.. versionadded:: 1.3.0
Parameters
----------
dbName: str
name of the database to use. Default to the current database.
Returns
-------
list
list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
.. versionadded:: 1.4.0
Returns
-------
:class:`DataFrameReader`
"""
return DataFrameReader(self)
@property
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
Returns
-------
:class:`DataStreamReader`
>>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
"""
return DataStreamReader(self)
@property
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams())
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from ``hive-site.xml`` on the classpath.
It supports running both SQL and HiveQL commands.
.. deprecated:: 2.0.0
Use SparkSession.builder.enableHiveSupport().getOrCreate().
Parameters
----------
sparkContext : :class:`SparkContext`
The SparkContext to wrap.
jhiveContext : optional
An optional JVM Scala HiveContext. If set, we do not instantiate a new
:class:`HiveContext` in the JVM, instead we make all calls to this object.
This is only for internal use.
"""
def __init__(self, sparkContext, jhiveContext=None):
warnings.warn(
"HiveContext is deprecated in Spark 2.0.0. Please use " +
"SparkSession.builder.enableHiveSupport().getOrCreate() instead.",
DeprecationWarning)
if jhiveContext is None:
sparkContext._conf.set("spark.sql.catalogImplementation", "hive")
sparkSession = SparkSession.builder._sparkContext(sparkContext).getOrCreate()
else:
sparkSession = SparkSession(sparkContext, jhiveContext.sparkSession())
SQLContext.__init__(self, sparkContext, sparkSession, jhiveContext)
@classmethod
def _createForTesting(cls, sparkContext):
"""(Internal use only) Create a new HiveContext for testing.
All test code that touches HiveContext *must* go through this method. Otherwise,
you may end up launching multiple derby instances and encounter with incredibly
confusing error messages.
"""
jsc = sparkContext._jsc.sc()
jtestHive = sparkContext._jvm.org.apache.spark.sql.hive.test.TestHiveContext(jsc, False)
return cls(sparkContext, jtestHive)
def refreshTable(self, tableName):
"""Invalidate and refresh all the cached the metadata of the given
table. For performance reasons, Spark SQL or the external data source
library it uses might cache certain metadata about a table, such as the
location of blocks. When those change outside of Spark SQL, users should
call this function to invalidate the cache.
"""
self._ssql_ctx.refreshTable(tableName)
def _test():
import os
import doctest
import tempfile
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.context
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.context.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
globs['df'] = rdd.toDF()
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", "field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.context, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
roxyboy/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 213 | 3359 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
Gezerj/Data-Analysis | Task-Problems/TASK 1.py | 1 | 1909 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 04 10:57:34 2017
@author: Gerwyn
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
"""
1) A new gun is expected to be able to fire its bullet to around 200m. However the manufacturers have identified 14 independent physical phenomena, each of which could affect the distance of the bullet by as much as +/- 8 meters.
Using a uniform random number to mimic each of the physical phenomenon, show how one might expect the distribution of bullet distances to look after 1000 fires of the gun. (You need to make a plot!)
Calculate the mean and standard deviation of the distribution.
What happens if the number of random phenomena affecting the gun is actually only 4.
Discuss the shapes of the distribution with your classmates, and discuss why they look they way they do
Report what fraction of the distances are above 190m in each case.
"""
################################################
plt.close('all')
dist = 200
ierr = 8
ipp = 14
num = 1000
distribution = np.zeros(num)
for j in xrange(num):
X = np.random.uniform (-ierr , ierr, ipp)
L = np.sum(X)
distribution[j] = dist + L
plt.figure()
plt.hist(distribution, bins = 20)
################################################
m = np.mean(distribution)
s = np.std(distribution)
################################################
new_distribution = np.zeros(num)
new_ipp = 4
for j in xrange(num):
X = np.random.uniform (-ierr , ierr, new_ipp)
L = np.sum(X)
new_distribution[j] = dist + L
plt.figure()
plt.hist(new_distribution, bins = 20)
above = distribution[distribution > 190]
new_above = new_distribution[new_distribution > 190]
print len(above)
print len(new_above)
################################################# | gpl-3.0 |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/sklearn/neighbors/approximate.py | 8 | 22053 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest()
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=None)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
distances = pairwise_distances(query, self._fit_X[candidates],
metric='cosine')[0]
distance_positions = np.argsort(distances)
return distance_positions, distances[distance_positions]
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[i], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[i], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| mit |
mpharrigan/msmbuilder | MSMBuilder/plot_graph.py | 2 | 4369 | # This file is part of MSMBuilder.
#
# Copyright 2011 Stanford University
#
# MSMBuilder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Code for visualizing networks (e.g. transition, count, or flux matrices).
"""
from __future__ import print_function, division, absolute_import
import scipy.sparse
import numpy as np
import networkx
import sys
import re
import logging
logger = logging.getLogger(__name__)
def CreateNetwork(Matrix, EqPops, Directed=True, EdgeScale=2, PopCutoff=0.01, EdgeCutoff=0.1, ImageList=None, Labels=None):
"""Creates a NetworkX graph (or DiGraph) object.
Inputs:
Matrix -- a sparse matrix.
EqPops -- state equilibrium populations.
Keyword Arguments:
Directed -- Is the graph directed? Default: True
EdgeScale -- a scale factor for edges. Default: 2
PopCutoff -- hide states with populations lower than this. Default: 0.01
EdgeCutoff -- hide edges with weights lower than this. Default: 0.1
ImageList -- A list of filenames for visualization on each states. Default: None
Labels -- A List of labels for display on each state. Default: None
Notes:
The NetworkX graph can be used for simple visualization (e.g. matplotlib) or export to a GraphViz dotfile.
You can optionally add image paths to the graph, which will be recorded in the dotfile for eventual visualization.
"""
Matrix=Matrix.tocsr()
print("Loaded an MSM with %d states..." % Matrix.shape[0])
#These are the desired states.
Ind=np.where(EqPops>PopCutoff)[0]
if len(Ind) == 0:
raise ValueError("Error! No nodes will be rendered. Try lowering the population cutoff (epsilon).")
# if user specified labels use those, otherwise use Ind
if Labels == None:
Labels = Ind
#Select the desired subset of Matrix
Matrix=Matrix[Ind][:,Ind]
EqEnergy=-np.log(EqPops[Ind])
# invert thigns so less populated states are smaller than more populated ones
maxEqEnergy = EqEnergy.max()
EqEnergy = 1 + maxEqEnergy - EqEnergy # add 1 to ensure nothing gets 0 weight
# Renormalize stuff to make it more reasonable
frm, to, weight = scipy.sparse.find( Matrix )
weight /= weight.max()
weight *= EdgeScale
n=Matrix.shape[0]
if Directed:
G=networkx.from_scipy_sparse_matrix(Matrix,create_using=networkx.DiGraph())
else:
G=networkx.from_scipy_sparse_matrix(Matrix)
logger.info("Rendering %d nodes...", n)
# Write attributes to G
for i in range(n):
G.node[i]["width"]=EqEnergy[i]/3
G.node[i]["height"]=EqEnergy[i]/3
G.node[i]["fixedsize"] = True
G.node[i]["label"]=Labels[i]
for i in range(len(weight)):
G[frm[i]][to[i]]["penwidth"]=weight[i]*2
G[frm[i]][to[i]]["arrowsize"]=weight[i]*2
#Save image paths if desired.
if ImageList!=None:
logger.info("Found %d images - attempting to include them in the .dot file", len(ImageList))
ImageList=np.array(ImageList)
for Image in ImageList:
match = re.findall( '(\d+)', Image )
if len(match) > 0:
state = int( match[0] )
if state in Ind:
Gind = int(np.where( Ind == state)[0])
G.node[Gind]["image"]=Image
logger.info("Found an image for state: %d", state)
return(G)
def PlotNetwork(G,OutputFile="Graph.dot"):
"""Plot a graph to screen and also save output."""
try:
networkx.draw_graphviz(G)
except:
logger.error("could not plot graph to screen. Check X / Matplotlib settings.")
networkx.write_dot(G, OutputFile)
logger.info("Wrote: %s", OutputFile)
| gpl-2.0 |
LangmuirSim/langmuir | LangmuirPython/langmuir/ivcurve.py | 2 | 18624 | # -*- coding: utf-8 -*-
"""
.. note::
Functions for analyzing current voltage data.
.. moduleauthor:: Adam Gagorik <[email protected]>
"""
from langmuir import units, Quantity
import langmuir as lm
import pandas as pd
import numpy as np
import collections
import StringIO
class IVCurve(object):
"""
A class to analyze IV curves.
======== ==========================
Attr Description
======== ==========================
**i** current
**v** voltage
**p** power
**a** area
**j** current density
**r** irradiance
======== ==========================
.. seealso::
:py:class:`IVCurveSolar`
"""
class units:
def __init__(self):
pass
i = units.nA
v = units.V
p = units.nW
a = units.nm**2
j = units.mA/units.cm**2
r = units.mW/units.cm**2
class latex:
def __init__(self):
pass
i = r'\ensuremath{nA}'
v = r'\ensuremath{V}'
p = r'\ensuremath{nW}'
a = r'\ensuremath{nm^{2}}'
j = r'\ensuremath{mA\,cm^{-2}}'
r = r'\ensuremath{mW\,cm^{-2}}'
class string:
def __init__(self):
pass
i = r'nA'
v = r'V'
p = r'nW'
a = r'nm**2'
j = r'mA/cm**2'
r = r'mW/cm**2'
class columns:
def __init__(self):
pass
i = r'drain:current'
v = r'voltage.right'
x = r'grid.x'
y = r'grid.y'
def __init__(self, i, v, a, ierr=None):
"""
Construct IV curve.
:param i: current
:param v: voltage
:param a: area
:param ierr: std of current
:type i: :py:class:`numpy.ndarray`
:type v: :py:class:`numpy.ndarray`
:type a: :py:class:`numpy.ndarray` or :py:class:`float`
>>> v = np.linspace(0, 100, 10)
>>> i = np.tanh(v)
>>> a = 1024 * 256
>>> ivcurve = lm.ivcurve.IVCurve(i, v, a)
.. seealso::
:py:meth:`IVCurve.from_dataframe`
:py:meth:`IVCurve.from_panel`
:py:meth:`IVCurve.load_pkl`
:py:meth:`IVCurve.load_pkls`
"""
i = np.asanyarray(i)
v = np.asanyarray(v)
if np.isscalar(a):
a = np.ones_like(i) * a
else:
a = np.asanyarray(a)
assert i.shape == v.shape == a.shape
### <<< units on
self.i = Quantity(i, self.units.i) # current
self.v = Quantity(v, self.units.v) # voltage
self.a = Quantity(a, self.units.a) # area
self.p = (self.i * self.v).to(self.units.p) # power
self.j = (self.i / self.a).to(self.units.j) # density
self.r = (self.p / self.a).to(self.units.r) # irradiance
if ierr is None:
ierr = np.zeros_like(i)
else:
ierr = np.asanyarray(ierr)
self.ierr = Quantity(ierr, self.units.i)
self.perr = (self.ierr * self.v).to(self.units.p) # power
self.jerr = (self.ierr / self.a).to(self.units.j) # density
self.rerr = (self.perr / self.a).to(self.units.r) # irradiance
### <<< units off
self.i = np.asanyarray(self.i.magnitude)
self.v = np.asanyarray(self.v.magnitude)
self.p = np.asanyarray(self.p.magnitude)
self.a = np.asanyarray(self.a.magnitude)
self.j = np.asanyarray(self.j.magnitude)
self.r = np.asanyarray(self.r.magnitude)
self.ierr = np.asanyarray(self.ierr.magnitude)
self.perr = np.asanyarray(self.perr.magnitude)
self.jerr = np.asanyarray(self.jerr.magnitude)
self.rerr = np.asanyarray(self.rerr.magnitude)
class stats:
def __init__(self):
pass
i = lm.analyze.Stats(self.i, 'i')
v = lm.analyze.Stats(self.v, 'v')
p = lm.analyze.Stats(self.p, 'p')
a = lm.analyze.Stats(self.a, 'a')
j = lm.analyze.Stats(self.j, 'j')
r = lm.analyze.Stats(self.r, 'r')
ierr = lm.analyze.Stats(self.ierr, 'ierr')
jerr = lm.analyze.Stats(self.jerr, 'jerr')
perr = lm.analyze.Stats(self.perr, 'perr')
rerr = lm.analyze.Stats(self.rerr, 'rerr')
self.stats = stats
@classmethod
def from_dataframe(cls, frame):
"""
Construct IV curve from pandas DataFrame.
:param frame: dataframe object
:type frame: :py:class:`pandas.DataFrame`
>>> frame = lm.common.load_pkl('calculated.pkl.gz')
>>> ivcurve = lm.ivcurve.IVCurve(frame)
.. seealso::
:py:meth:`IVCurve.load_pkl`
"""
i = np.asanyarray(frame[cls.columns.i])
v = np.asanyarray(frame[cls.columns.v])
x = np.asanyarray(frame[cls.columns.x])
y = np.asanyarray(frame[cls.columns.y])
a = x * y
IVCurve = cls(i, v, a)
return IVCurve
@classmethod
def from_panel(cls, panel):
"""
Construct IV curve from pandas Panel.
:param panel: panel object
:type panel: :py:class:`pandas.Panel`
>>> pkls = lm.find.pkls('.', stub='gathered*', r=True)
>>> panel = lm.analyze.create_panel(lm.common.load_pkls(pkls))
>>> ivcurve = lm.ivcurve.IVCurve.from_panel(panel)
.. seealso::
:py:meth:`IVCurve.load_pkls`
"""
i = np.asanyarray(
panel.minor_xs(cls.columns.i).mean(axis=1).fillna(0))
v = np.asanyarray(
panel.minor_xs(cls.columns.v).mean(axis=1).fillna(0))
grid_x = np.asanyarray(panel.minor_xs(cls.columns.x).mean(axis=1))
grid_y = np.asanyarray(panel.minor_xs(cls.columns.y).mean(axis=1))
a = grid_x * grid_y
ierr = np.asanyarray(
panel.minor_xs(cls.columns.i).std(axis=1).fillna(0))
IVCurve = cls(i, v, a, ierr)
return IVCurve
@classmethod
def load_pkl(cls, pkl):
"""
Construct IV curve from pkl file.
:param pkl: name of file
:type pkl: str
>>> ivcurve = lm.ivcurve.IVCurve.load_pkl('gathered.pkl.gz')
"""
pkl = lm.common.load_pkl(pkl)
return cls.from_dataframe(pkl)
@classmethod
def load_pkls(cls, pkls):
"""
Construct IV curve from a list of pkl files.
:param pkls: list of filenames.
:type pkls: list
>>> pkls = lm.find.pkls('.', stub='gathered*', r=True)
>>> ivcurve = lm.ivcurve.IVCurve.load_pkls(pkls)
"""
panel = lm.analyze.create_panel(lm.common.load_pkls(pkls))
return cls.from_panel(panel)
def to_dict(self):
"""
Get data as dict.
"""
d = collections.OrderedDict()
d['v' ] = self.v
d['i' ] = self.i
d['ierr'] = self.ierr
d['p' ] = self.p
d['perr'] = self.perr
d['a' ] = self.a
d['j' ] = self.j
d['jerr'] = self.jerr
d['r' ] = self.r
d['rerr'] = self.rerr
return d
def to_dataframe(self):
"""
Convert data into a :py:class:`pandas.DataFrame`
"""
d = self.to_dict()
return pd.DataFrame(dict(d), columns=d.keys())
def save_csv(self, handle, **kwargs):
"""
Save data to a CSV file.
:param handle: filename
:type handle: str
"""
handle = lm.common.zhandle(handle, 'wb')
frame = self.to_dataframe()
_kwargs = dict(index=True)
_kwargs.update(**kwargs)
frame.to_csv(handle, **_kwargs)
handle.write('#\n')
series = self.to_series()
series.to_csv(handle, **_kwargs)
def summary(self):
"""
Get summary of all calculated results as a dictionary.
"""
d = collections.OrderedDict()
d.update(self.stats.i.to_dict())
d.update(self.stats.v.to_dict())
d.update(self.stats.p.to_dict())
d.update(self.stats.a.to_dict())
d.update(self.stats.j.to_dict())
d.update(self.stats.r.to_dict())
d.update(self.stats.ierr.to_dict())
d.update(self.stats.jerr.to_dict())
d.update(self.stats.perr.to_dict())
d.update(self.stats.rerr.to_dict())
return d
def to_series(self):
"""
Convert data into a :py:class:`pandas.Series`
"""
d = self.summary()
return pd.Series(d, index=d.keys())
def save_pkl(self, handle):
"""
Save results and data to a PKL file.
:param handle: filename
:type handle: str
"""
results = self.to_dict()
results.update(self.summary())
lm.common.save_pkl(results, handle)
class IVCurveSolar(IVCurve):
"""
A class to analyze IV curves for solar cells. Calculates the fill factor.
======== ==========================
Attr Description
======== ==========================
**v_oc** open circuit voltage
**v_mp** voltage at max power
**i_sc** short circuit current
**i_mp** current at max power
**p_th** theoretical power
**p_mp** max power
**fill** fill factor
======== ==========================
"""
class latex(IVCurve.latex):
def __init__(self):
pass
f = r'\%'
v_oc = r'\ensuremath{v_{oc}}'
i_sc = r'\ensuremath{i_{sc}}'
p_th = r'\ensuremath{p_{th}}'
j_sc = r'\ensuremath{j_{sc}}'
r_th = r'\ensuremath{r_{th}}'
i_mp = r'\ensuremath{i_{mp}}'
v_mp = r'\ensuremath{v_{mp}}'
p_mp = r'\ensuremath{p_{mp}}'
j_mp = r'\ensuremath{j_{mp}}'
r_mp = r'\ensuremath{r_{mp}}'
fill = r'\ensuremath{FF}'
class string(IVCurve.string):
def __init__(self):
pass
f = r'%'
v_oc = r'v_oc'
i_sc = r'i_sc'
p_th = r'p_th'
j_sc = r'j_sc'
r_th = r'r_th'
i_mp = r'i_mp'
v_mp = r'v_mp'
p_mp = r'p_mp'
j_mp = r'j_mp'
r_mp = r'r_mp'
fill = r'fill'
def __init__(self, i, v, a, ierr=None, v_shift=1.5, **kwargs):
"""
Construct IV curve. See arguments for :py:class:`IVCurve`.
:param v_shift: amount to shift voltage axis by
:type v_shift: float
>>> v = np.linspace(0, 100, 10)
>>> i = np.tanh(v)
>>> a = 1024 * 256
>>> ivcurve = lm.ivcurve.IVCurveSolar(i, v, a)
"""
self.v_shift = v_shift
v = np.asanyarray(v) + v_shift
super(IVCurveSolar, self).__init__(i, v, a, ierr)
self.v_oc = 0.0
self.i_sc = 0.0
self.p_th = 0.0
self.v_mp = 0.0
self.i_mp = 0.0
self.p_mp = 0.0
self.j_sc = 0.0
self.r_th = 0.0
self.j_mp = 0.0
self.r_mp = 0.0
self.fill = 0.0
self.ifit = None
self.pfit = None
self.jfit = None
self.rfit = None
if kwargs:
self.calculate(**kwargs)
def summary(self):
"""
Get summary of all calculated results as a dictionary.
"""
results = IVCurve.summary(self)
results.update(v_oc=self.v_oc)
results.update(i_sc=self.i_sc)
results.update(p_th=self.p_th)
results.update(v_mp=self.v_mp)
results.update(i_mp=self.i_mp)
results.update(p_mp=self.p_mp)
results.update(j_sc=self.j_sc)
results.update(r_th=self.r_th)
results.update(j_mp=self.j_mp)
results.update(r_mp=self.r_mp)
results.update(fill=self.fill)
return results
def calculate(self, mode='interp1d', recycle=False, guess=None, **kwargs):
"""
Calculate fill factor. kwargs are passed to fitting functions.
:param mode: fitting mode (tanh, power, interp1d, spline, erf)
:param recycle: recycle fit parameters when guessing
:param guess: guess for v_oc (default=shift)
:type mode: str
:type recycle: bool
:type guess: float
============ ============
Mode Option
============ ============
**power** order=8
**tanh**
**erf**
**interp1d** kind='cubic'
**spline** k=5
============ ============
>>> ivcurve = lm.ivcurve.IVCurve.load_pkl('gathered.pkl.gz')
>>> ivcurve.calculate(mode='interp1d', kind='linear')
>>> print ivcurve.fill
"""
if mode == 'power':
self._fit_power(recycle=recycle, **kwargs)
elif mode == 'tanh':
self._fit_tanh(recycle=recycle, **kwargs)
elif mode == 'erf':
self._fit_erf(recycle=recycle, **kwargs)
elif mode == 'interp1d':
self._fit_interp1d(recycle=recycle, **kwargs)
elif mode == 'spline':
self._fit_spline(recycle=recycle, **kwargs)
else:
raise NotImplementedError('unknown fit mode: %s' % mode)
if guess is None:
guess = self.v_shift
### >>> units on
self.v_oc = self.ifit.solve(x0=guess) * self.units.v
self.i_sc = self.ifit(0) * self.units.i
self.p_th =(self.v_oc * self.i_sc).to(self.units.p)
self.v_mp = self.pfit.minimize(0.66 * self.v_oc.magnitude,
return_y=False) * self.units.v
self.i_mp = self.ifit(self.v_mp.magnitude) * self.units.i
self.p_mp =(self.i_mp * self.v_mp).to(self.units.p)
area = np.mean(self.a) * self.units.a
self.j_sc = (self.i_sc / area).to(self.units.j)
self.j_mp = (self.i_mp / area).to(self.units.j)
self.r_th = (self.p_th / area).to(self.units.r)
self.r_mp = (self.p_mp / area).to(self.units.r)
self.fill = (self.p_mp / self.p_th * 100)
self.fill = float(self.fill.magnitude)
self.v_oc = float(self.v_oc.magnitude)
self.i_sc = float(self.i_sc.magnitude)
self.p_th = float(self.p_th.magnitude)
self.v_mp = float(self.v_mp.magnitude)
self.i_mp = float(self.i_mp.magnitude)
self.p_mp = float(self.p_mp.magnitude)
self.j_sc = float(self.j_sc.magnitude)
self.r_th = float(self.r_th.magnitude)
self.j_mp = float(self.j_mp.magnitude)
self.r_mp = float(self.r_mp.magnitude)
### <<< units off
def _fit_power(self, recycle=False, order=8, **kwargs):
"""
Fit using power series.
"""
self.ifit = lm.fit.FitPower(self.v, self.i,
popt=None, yerr=self.ierr, order=order)
self.jfit = lm.fit.FitPower(self.v, self.j,
popt=None, yerr=self.jerr, order=order)
if recycle:
p_popt = self.ifit.popt
r_popt = self.jfit.popt
else:
p_popt = None
r_popt = None
order += 1
self.pfit = lm.fit.FitPower(self.v, self.p,
popt=p_popt, yerr=self.perr, order=order)
self.rfit = lm.fit.FitPower(self.v, self.r,
popt=r_popt, yerr=self.rerr, order=order)
def _fit_tanh(self, recycle=False, **kwargs):
"""
Fit using hyperbolic tangent.
"""
i_popt = [self.stats.i.min, 1, -self.v_shift, 0]
j_popt = [self.stats.j.min, 1, -self.v_shift, 0]
self.ifit = lm.fit.FitTanh(self.v, self.i, popt=i_popt,
yerr=self.ierr)
self.jfit = lm.fit.FitTanh(self.v, self.j, popt=j_popt,
yerr=self.jerr)
if recycle:
p_popt = self.ifit.popt
r_popt = self.jfit.popt
else:
p_popt = i_popt
r_popt = j_popt
self.pfit = lm.fit.FitXTanh(self.v, self.p, popt=p_popt,
yerr=self.perr)
self.rfit = lm.fit.FitXTanh(self.v, self.r, popt=r_popt,
yerr=self.rerr)
def _fit_erf(self, recycle=False, **kwargs):
"""
Fit using error function.
"""
i_popt = [self.stats.i.min, 1, -self.v_shift, 0]
j_popt = [self.stats.j.min, 1, -self.v_shift, 0]
self.ifit = lm.fit.FitErf(self.v, self.i, popt=i_popt,
yerr=self.ierr)
self.jfit = lm.fit.FitErf(self.v, self.j, popt=j_popt,
yerr=self.jerr)
if recycle:
p_popt = self.ifit.popt
r_popt = self.jfit.popt
else:
p_popt = i_popt
r_popt = j_popt
self.pfit = lm.fit.FitXErf(self.v, self.p, popt=p_popt,
yerr=self.perr)
self.rfit = lm.fit.FitXErf(self.v, self.r, popt=r_popt,
yerr=self.rerr)
def _fit_interp1d(self, recycle=False, kind='cubic', **kwargs):
"""
Fit using an interpolating polynomial.
"""
self.ifit = lm.fit.FitInterp1D(self.v, self.i, popt=None,
yerr=self.ierr, kind=kind)
self.jfit = lm.fit.FitInterp1D(self.v, self.j, popt=None,
yerr=self.jerr, kind=kind)
self.pfit = lm.fit.FitInterp1D(self.v, self.p, popt=None,
yerr=self.perr, kind=kind)
self.rfit = lm.fit.FitInterp1D(self.v, self.r, popt=None,
yerr=self.rerr, kind=kind)
def _fit_spline(self, recycle=False, k=3, **kwargs):
"""
Fit using spline.
"""
self.ifit = lm.fit.FitUnivariateSpline(self.v, self.i,
popt=None, yerr=self.ierr, k=k)
self.jfit = lm.fit.FitUnivariateSpline(self.v, self.j,
popt=None, yerr=self.jerr, k=k)
self.pfit = lm.fit.FitUnivariateSpline(self.v, self.p,
popt=None, yerr=self.perr, k=k)
self.rfit = lm.fit.FitUnivariateSpline(self.v, self.r,
popt=None, yerr=self.rerr, k=k)
def __str__(self):
s = StringIO.StringIO()
print >> s, '[IVCurveSolar]'
print >> s, ' {self.string.v_oc} = {self.v_oc:{fmt}} {self.string.v}'
print >> s, ' {self.string.v_mp} = {self.v_mp:{fmt}} {self.string.v}'
print >> s, ' {self.string.i_sc} = {self.i_sc:{fmt}} {self.string.i} {self.j_sc:{fmt}} {self.string.j}'
print >> s, ' {self.string.i_mp} = {self.i_mp:{fmt}} {self.string.i} {self.j_mp:{fmt}} {self.string.j}'
print >> s, ' {self.string.p_th} = {self.p_th:{fmt}} {self.string.p} {self.r_th:{fmt}} {self.string.r}'
print >> s, ' {self.string.p_mp} = {self.p_mp:{fmt}} {self.string.p} {self.r_mp:{fmt}} {self.string.r}'
print >> s, ' {self.string.fill} = {self.fill:{fmt}} {self.string.f}'
return s.getvalue().format(self=self, fmt='12.8f') | gpl-2.0 |
Chiroptera/QCThesis | MyML/cluster/K_Means_wrapper.py | 3 | 1057 | import numpy as np
from datetime import datetime
from sklearn.cluster import KMeans
# Receives:
# - mixture : n x d array with n points and d dimensions
# - numClusters : number of clusters to use
# - numInits : number of k-means runs
# Returns:
# - k_centroids : list of final centroids of each iteration
# - qk_assignment : list of assignments of each point to one of the
# centroids on each iteration
# - k_timings_cg : list of timing for each iteration
def k_means(mixture,numClusters,numInits):
k_timings_cg=list()
start=datetime.now()
k_assignment=list()
k_centroids=list()
k_inertia=list()
for i in range(numInits):
estimator = KMeans(n_clusters=numClusters,init='k-means++',n_init=1)
assignment = estimator.fit_predict(mixture)
centroids = estimator.cluster_centers_
k_centroids.append(centroids)
k_assignment.append(assignment)
k_inertia.append(estimator.inertia_)
k_timings_cg.append((datetime.now() - start).total_seconds())
start=datetime.now()
return k_centroids,k_assignment,k_timings_cg,k_inertia
| mit |
Sklearn-HMM/scikit-learn-HMM | sklean-hmm/svm/classes.py | 4 | 28639 | from .base import BaseLibLinear, BaseSVC, BaseLibSVM
from ..base import RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin
from ..feature_selection.from_model import _LearntSelectorMixin
class LinearSVC(BaseLibLinear, LinearClassifierMixin, _LearntSelectorMixin,
SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better (to large numbers of
samples).
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'l1' or 'l2' (default='l2')
Specifies the loss function. 'l1' is the hinge loss (standard SVM)
while 'l2' is the squared hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from an theoretical perspective
as it is consistent it is seldom used in practice and rarely leads to
better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : int, default: 0
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Attributes
----------
`coef_` : array, shape = [n_features] if n_classes == 2 \
else [n_classes, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `raw_coef_` that \
follows the internal memory layout of liblinear.
`intercept_` : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. Furthermore
SGDClassifier is scalable to large number of samples as it uses
a Stochastic Gradient Descent optimizer.
Finally SGDClassifier can fit both dense and sparse data without
memory copy if the input is C-contiguous or CSR.
"""
def __init__(self, penalty='l2', loss='l2', dual=True, tol=1e-4, C=1.0,
multi_class='ovr', fit_intercept=True, intercept_scaling=1,
class_weight=None, verbose=0, random_state=None):
super(LinearSVC, self).__init__(
penalty=penalty, loss=loss, dual=dual, tol=tol, C=C,
multi_class=multi_class, fit_intercept=fit_intercept,
intercept_scaling=intercept_scaling,
class_weight=class_weight, verbose=verbose,
random_state=random_state)
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementations is a based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each,
see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
.. The narrative documentation is available at http://scikit-learn.org/
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default=0.0)
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 0.0 then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [n_SV, n_features]
Support vectors.
`n_support_` : array-like, dtype=int32, shape = [n_class]
number of support vector for each class.
`dual_coef_` : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function. \
For multiclass, coefficient for all 1-vs-1 classifiers. \
The layout of the coefficients in the multiclass case is somewhat \
non-trivial. See the section about multi-class classification in the \
SVM section of the User Guide for details.
`coef_` : array, shape = [n_class-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3,
gamma=0.0, kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma=0.0,
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, random_state=None):
super(SVC, self).__init__(
'c_svc', kernel, degree, gamma, coef0, tol, C, 0., 0., shrinking,
probability, cache_size, class_weight, verbose, max_iter,
random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [n_SV, n_features]
Support vectors.
`n_support_` : array-like, dtype=int32, shape = [n_class]
number of support vector for each class.
`dual_coef_` : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function. \
For multiclass, coefficient for all 1-vs-1 classifiers. \
The layout of the coefficients in the multiclass case is somewhat \
non-trivial. See the section about multi-class classification in \
the SVM section of the User Guide for details.
`coef_` : array, shape = [n_class-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, coef0=0.0, degree=3, gamma=0.0, kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma=0.0,
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, verbose=False, max_iter=-1,
random_state=None):
super(NuSVC, self).__init__(
'nu_svc', kernel, degree, gamma, coef0, tol, 0., nu, 0., shrinking,
probability, cache_size, None, verbose, max_iter, random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementations is a based on libsvm.
Parameters
----------
C : float, optional (default=1.0)
penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimaton.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [nSV, n_features]
Support vectors.
`dual_coef_` : array, shape = [n_classes-1, n_SV]
Coefficients of the support vector in the decision function.
`coef_` : array, shape = [n_classes-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma=0.0,
kernel='rbf', max_iter=-1, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
"""
def __init__(self, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, tol=1e-3,
C=1.0, epsilon=0.1, shrinking=True, probability=False,
cache_size=200, verbose=False, max_iter=-1,
random_state=None):
super(SVR, self).__init__(
'epsilon_svr', kernel, degree, gamma, coef0, tol, C, 0., epsilon,
shrinking, probability, cache_size, None, verbose,
max_iter, random_state)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces with the parameter epsilon of SVR.
The implementations is a based on libsvm.
Parameters
----------
C : float, optional (default=1.0)
penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken. Only available if impl='nu_svc'.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [nSV, n_features]
Support vectors.
`dual_coef_` : array, shape = [n_classes-1, n_SV]
Coefficients of the support vector in the decision function.
`coef_` : array, shape = [n_classes-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma=0.0, kernel='rbf',
max_iter=-1, nu=0.1, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma=0.0, coef0=0.0, shrinking=True,
probability=False, tol=1e-3, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(NuSVR, self).__init__(
'nu_svr', kernel, degree, gamma, coef0, tol, C, nu, 0., shrinking,
probability, cache_size, None, verbose, max_iter, random_state)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outliers Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default=0.0)
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 0.0 then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking: boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [nSV, n_features]
Support vectors.
`dual_coef_` : array, shape = [n_classes-1, n_SV]
Coefficient of the support vector in the decision function.
`coef_` : array, shape = [n_classes-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, tol=1e-3,
nu=0.5, shrinking=True, cache_size=200, verbose=False,
max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, [], sample_weight=sample_weight,
**params)
return self
| bsd-3-clause |
CA-Lab/moral-exchange | simulations/hebbian.py | 1 | 3131 | import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import pylab as pl
import random as rd
import scipy as sp
import networkx as nx
import numpy as np
import math as mt
import pprint as ppt
time_list = []
energy_state = []
perturbation_period = 460
pert_accu = 0
def init_full():
global time, g, positions, U
E = 0
time = 0
g = nx.complete_graph(50)
for i in g.nodes():
g.node[i]['s'] = rd.choice([1,-1])
for i,j in g.edges():
g.edge[i][j]['w'] = rd.choice([-1,1])
def init_watts():
global time, g, positions, U
E = 0
time = 0
g = nx.watts_strogatz_graph(25, 2, 0.3)
for i in g.nodes():
g.node[i]['s'] = rd.choice([1,-1])
for i,j in g.edges():
g.edge[i][j]['w'] = rd.choice([-1,1])
def init_erdos():
global time, g, positions, U
E = 0
time = 0
g = nx.erdos_renyi_graph(56, .08)
for i in g.nodes():
g.node[i]['s'] = rd.choice([1,-1])
for i,j in g.edges():
g.edge[i][j]['w'] = rd.choice([-1,1])
def draw():
pl.cla()
nx.draw(g, pos = positions,
node_color = [g.node[i]['s'] for i in g.nodes_iter()],
with_labels = True, edge_color = 'c',
width = [g.edge[i][j]['w'] for (i,j) in g.edges_iter()],
cmap = pl.cm.autumn, vmin = 0, vmax = 1)
pl.axis('image')
pl.title('t = ' + str(time))
plt.show()
def local_u(i):
m = []
i = rd.choice(g.nodes())
for j in g.neighbors(i):
m.append( g.edge[i][j]['w'] * g.node[j]['s'] )
return sum(m)
def global_u():
U = []
for i in g.nodes():
U.append( local_u( i ) )
#print sum( U )
return sum(U)
def randomize_states():
for i in g.nodes():
g.node[i]['s'] = rd.choice([1,-1])
def hebbian():
global time, g, positions, U
i = rd.choice(g.nodes())
if local_u( i ) >= 0:
g.node[i]['s'] = 1
else:
g.node[i]['s'] = -1
for j in g.neighbors( i ):
if g.node[i]['s'] == g.node[j]['s']:
g.edge[i][j]['w'] += 1
else:
g.edge[i][j]['w'] -= 1
def hopfield():
i = rd.choice(g.nodes())
if local_u(i) >= 0:
g.node[i]['s'] = 1
else:
g.node[i]['s'] = -1
def step():
global time, g, positions, U, pert_accu, perturbation_period
time += 1
if pert_accu == perturbation_period:
pert_accu = 0
randomize_states()
else:
pert_accu += 1
# if time <= 25:
# hopfield()
# else:
hebbian()
time_list.append(time)
energy_state.append( global_u() )
def no_draw():
global time
print time
import pycxsimulator
#init()
#init_full()
#init_watts()
init_erdos()
#init_barabasi()
positions = nx.spring_layout(g)
pycxsimulator.GUI().start(func = [init_erdos, no_draw, step])
#pycxsimulator.GUI().start(func = [init_erdos, draw, step])
plt.cla()
plt.plot(time_list, energy_state, 'b-')
plt.xlabel('Time')
plt.ylabel('Global Utility')
plt.savefig('hebb_plot.png')
#plt.show()
| gpl-3.0 |
doanduyhai/incubator-zeppelin | interpreter/lib/python/mpl_config.py | 41 | 3653 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module provides utitlites for users to configure the inline plotting
# backend through a PyZeppelinContext instance (eg, through z.configure_mpl())
import matplotlib
def configure(**kwargs):
"""
Generic configure function.
Usage: configure(prop1='foo', prop2='bar', ...)
Currently supported zeppelin-specific properties are:
interactive - If true show all figures without explicit call to show()
via a post-execute hook.
angular - If true, bind figures to angular display system.
close - If true, close all figures once shown.
width, height - Default width / height of the figure in pixels.
fontsize - Font size.
dpi - dpi of the figure.
fmt - Figure format
supported_formats - Supported Figure formats ()
context - ZeppelinContext instance (requires PY4J)
"""
_config.update(**kwargs)
# Broadcast relevant changes to matplotlib RC
_on_config_change()
def get(key):
"""
Get the configuration info given a key
"""
return _config[key]
def _on_config_change():
# dpi
dpi = _config['dpi']
# For older versions of matplotlib, savefig.dpi is not synced with
# figure.dpi by default
matplotlib.rcParams['figure.dpi'] = dpi
if matplotlib.__version__ < '2.0.0':
matplotlib.rcParams['savefig.dpi'] = dpi
# Width and height
width = float(_config['width']) / dpi
height = float(_config['height']) / dpi
matplotlib.rcParams['figure.figsize'] = (width, height)
# Font size
fontsize = _config['fontsize']
matplotlib.rcParams['font.size'] = fontsize
# Default Figure Format
fmt = _config['format']
supported_formats = _config['supported_formats']
if fmt not in supported_formats:
raise ValueError("Unsupported format %s" %fmt)
if matplotlib.__version__ < '1.2.0':
matplotlib.rcParams.update({'savefig.format': fmt})
else:
matplotlib.rcParams['savefig.format'] = fmt
# Interactive mode
interactive = _config['interactive']
matplotlib.interactive(interactive)
def _init_config():
dpi = matplotlib.rcParams['figure.dpi']
if matplotlib.__version__ < '1.2.0':
matplotlib.rcParams.update({'savefig.format': 'png'})
fmt = matplotlib.rcParams['savefig.format']
width, height = matplotlib.rcParams['figure.figsize']
fontsize = matplotlib.rcParams['font.size']
_config['dpi'] = dpi
_config['format'] = fmt
_config['width'] = width*dpi
_config['height'] = height*dpi
_config['fontsize'] = fontsize
_config['close'] = True
_config['interactive'] = matplotlib.is_interactive()
_config['angular'] = False
_config['supported_formats'] = ['png', 'jpg', 'svg']
_config['context'] = None
_config = {}
_init_config()
| apache-2.0 |
mayblue9/scikit-learn | sklearn/cross_decomposition/pls_.py | 187 | 28507 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <[email protected]>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| bsd-3-clause |
SonneSun/MYSELF | facebook-kaggle/06_05.py | 1 | 6299 | #Facebook Kaggle Competition
import time
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import LabelEncoder
XLIMIT = 10
YLIMIT = 10
#FEATURE_LIST = ['x','y','accuracy','day','dow','hour']
FEATURE_LIST = ['x','y','accuracy','dow','month','hour']
def read_files(train_name, test_name):
types = {'row_id': np.dtype(int), \
'x': np.dtype(float),\
'y' : np.dtype(float), \
'accuracy': np.dtype(int), \
'time': np.dtype(int), \
'place_id': np.dtype(int)}
train = pd.read_csv(train_name, dtype=types)
test = pd.read_csv(test_name, dtype=types)
return train, test
def accuracy_features(train):
print " accuracy features ..."
place_id_mode_loc = train.groupby(['place_id'])['x','y'].agg(lambda x:x.value_counts().index[0]).reset_index()
place_id_mode_loc.columns = ['place_id','x_mode','y_mode']
new_train = pd.merge(train, place_id_mode_loc, on = ['place_id'], how = 'inner')
new_train['x_diff'] = abs(new_train['x_mode'] - new_train['x'])
new_train['y_diff'] = abs(new_train['y_mode'] - new_train['y'])
new_train['acc'] = new_train['accuracy']
new_train.loc[(new_train['accuracy'] > 200), 'acc'] = 200
new_train['acc'] = (new_train['acc']/10).astype(int)
acc_df = new_train.groupby(['acc'])['x_diff','y_diff'].mean().reset_index()
print " finished."
return acc_df
def generate_features(train, test):
#acc = accuracy_features(train)
#as verified, time is in minutes
train['day'] = (train['time'] / (60.0 * 24.0)).astype(int)
train['dow'] = train['day'] % 7
train['month'] = train['day'] % 30
train['hour'] = (train['time'] / 60.0).astype(int) % 24
# train['acc'] = train['accuracy']
# train.loc[(train['accuracy'] > 200), 'acc'] = 200
# train['acc'] = (train['acc']/10).astype(int)
# train = pd.merge(train, acc, on = ['acc'], how = 'inner')
# train.loc[((train['hour'] >= 0) & (train['hour'] < 8) ), 'tod'] = 1
# train.loc[((train['hour'] >= 8) & (train['hour'] < 16) ), 'tod'] = 2
# train.loc[((train['hour'] >= 16) & (train['hour'] < 24) ), 'tod'] = 3
test['day'] = (test['time'] / (60.0 * 24.0)).astype(int)
test['dow'] = test['day'] % 7
test['month'] = test['day'] % 30
test['hour'] = (test['time'] / 60.0).astype(int) % 24
# test['acc'] = test['accuracy']
# test.loc[(test['accuracy'] > 200), 'acc'] = 200
# test['acc'] = (test['acc']/10).astype(int)
# test = pd.merge(test, acc, on = ['acc'], how = 'inner')
# test.loc[((test['hour'] >= 0) & (test['hour'] < 8) ), 'tod'] = 1
# test.loc[((test['hour'] >= 8) & (test['hour'] < 16) ), 'tod'] = 2
# test.loc[((test['hour'] >= 16) & (test['hour'] < 24) ), 'tod'] = 3
#split the map into several buckets
xcell_size = 0.2
ycell_size = 0.5
cell_total = int((XLIMIT/xcell_size) * (YLIMIT/ycell_size))
train['x_adj'] = train['x']
train.loc[(train['x_adj'] == XLIMIT),'x_adj'] = train['x_adj'] - 0.0001
train['y_adj'] = train['y']
train.loc[(train['y_adj'] == YLIMIT),'y_adj'] = train['y_adj'] - 0.0001
train['xbucket'] = (train['x_adj']/ xcell_size).astype(int)
train['ybucket'] = (train['y_adj'] / ycell_size).astype(int)
train['cellID'] = train['ybucket'] * (XLIMIT/ xcell_size) + train['xbucket']
test['x_adj'] = test['x']
test.loc[(test['x_adj'] == XLIMIT),'x_adj'] = test['x_adj'] - 0.0001
test['y_adj'] = test['y']
test.loc[(test['y_adj'] == YLIMIT),'y_adj'] = test['y_adj'] - 0.0001
test['xbucket'] = (test['x_adj'] / xcell_size).astype(int)
test['ybucket'] = (test['y_adj'] / ycell_size).astype(int)
test['cellID'] = test['ybucket'] * (XLIMIT/ xcell_size) + test['xbucket']
#normalize features
# for f in FEATURE_LIST:
# f_mean = train[f].mean()
# f_std = train[f].std()
# train[f] = (train[f] - f_mean) / f_std
# test[f] = (test[f] - f_mean) / f_std
return train, test, cell_total
def train_model_on_cell(train, test, numCells, th):
for i in range(numCells):
print " cell %d ..." % i
start = time.time()
train_curr = train.loc[train.cellID == i].reset_index(drop = True)
if len(train_curr) == 0:
continue
place_counts = train_curr.place_id.value_counts()
mask = place_counts[train_curr.place_id.values] >= th
train_curr = train_curr.loc[mask.values].reset_index(drop = True)
train_X = train_curr[FEATURE_LIST].as_matrix()
#weight = train_curr['accuracy'].values
train_Y = train_curr['place_id'].values
# le = LabelEncoder()
# train_Y = le.fit_transform(train_Y)
test_index = test.loc[test.cellID == i].index
test_label = test.loc[(test.cellID == i),'place_id'].values
test_curr = test.loc[test.cellID == i].reset_index(drop = True)
test_X = test_curr[FEATURE_LIST].as_matrix()
# print len(train_curr)
# print len(test_curr)
#increase estimators won't influence too much
rf = RandomForestClassifier(max_depth = 20, n_estimators = 30)
#add weight won't influence too much
#rf.fit(train_X, train_Y, sample_weight = weight)
rf.fit(train_X, train_Y)
#test_predict = rf.predict_proba(test_X)
test_predict = rf.predict(test_X)
# test_predict = np.argsort(test_predict, axis=1)[:,::-1][:,:3]
# test_predict = le.inverse_transform(test_predict)
result_df = pd.DataFrame({'index': test_index, 'place_id': test_label, 'predict': test_predict})
print len(result_df.loc[result_df.place_id == result_df.predict]) / float(len(result_df))
# test.loc[test_index,'place_id_1'] = test_predict[:,0]
# test.loc[test_index,'place_id_2'] = test_predict[:,1]
# test.loc[test_index,'place_id_3'] = test_predict[:,2]
end = time.time()
print " finished. Uses %f s in time. " % (end - start)
break
return test
if __name__ == '__main__':
print "Reading files ..."
train_df, test_df = read_files('small_train.csv', 'small_test.csv')
print " Finished."
print "Generating features ..."
train_df, test_df, cell_total = generate_features(train_df, test_df)
print " Finished."
print "Training cell by cell. Total number is %d ..." % cell_total
test_df = train_model_on_cell(train_df, test_df, cell_total, 10)
#print len(test_df.loc[test_df.place_id == test_df.predict]) / float(len(test_df))
# result_df = test_df[['row_id','place_id_1','place_id_2','place_id_3']]
# result_df.to_csv('result.csv', index = False)
| apache-2.0 |
dkoslicki/CMash | ideas/StreamingQueryDNADatabase_save_results.py | 1 | 20654 | #! /usr/bin/env python
import khmer
import marisa_trie as mt
import numpy as np
import os
import sys
# The following is for ease of development (so I don't need to keep re-installing the tool)
try:
from CMash import MinHash as MH
except ImportError:
try:
import MinHash as MH
except ImportError:
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from CMash import MinHash as MH
import multiprocessing
import pandas as pd
import argparse
from argparse import ArgumentTypeError
import re
import matplotlib.pyplot as plt
from hydra import WritingBloomFilter, ReadingBloomFilter
from scipy.sparse import csr_matrix, csc_matrix
from scipy.sparse import save_npz
from scipy.io import savemat
import timeit
from itertools import islice
# TODO: export hit matrices
def parseNumList(input):
"""Thank you stack overflow"""
m = re.match(r'(\d+)(?:-(\d+))?(?:-(\d+))?$', input)
# ^ (or use .split('-'). anyway you like.)
if not m:
raise ArgumentTypeError("'" + input + "' is not a range of number. Expected forms like '1-5' or '2' or '10-15-2'.")
start = int(m.group(1))
end = int(m.group(2))
if m.group(3):
increment = int(m.group(3))
else:
increment = 1
return list(range(start, end+1, increment))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This script calculates containment indicies for each of the training/reference sketches"
" by streaming through the query file.", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-t', '--threads', type=int, help="Number of threads to use", default=multiprocessing.cpu_count())
parser.add_argument('-c', '--containment_threshold', type=float, help="Only return results with containment index above this "
"threshold at the maximum k-mer size.", default=0.1)
parser.add_argument('-p', '--plot_file', action="store_true", help="Optional flag to specify that a plot of the "
"k-mer curves should also be saved (same basename"
"as the out_file).")
parser.add_argument('-r', '--reads_per_core', type=int, help="Number of reads per core in each chunk of parallelization."
" Set as high as memory will allow (eg. 1M on 256GB, 48 core machine)", default=100000)
parser.add_argument('-f', '--filter_file',
help="Location of pre-filter bloom filter. Use only if you absolutely know what you're doing "
"(hard to error check bloom filters).")
parser.add_argument('-l', '--location_of_thresh', type=int,
help="Location in range to apply the threshold passed by the -c flag. -l 2 -c 5-50-10 means the"
" threshold will be applied at k-size 25. Default is largest size.", default=-1)
parser.add_argument('--sensitive', action="store_true", help="Operate in sensitive mode. Marginally more true positives with significantly more false positives. Use with caution.", default=False)
parser.add_argument('-v', '--verbose', action="store_true", help="Print out progress report/timing information")
parser.add_argument('in_file', help="Input file: FASTA/Q file to be processes")
parser.add_argument('reference_file', help='Training database/reference file (in HDF5 format). Created with MakeStreamingDNADatabase.py')
parser.add_argument('out_file', help='Output csv file with the containment indices.')
parser.add_argument('range', type=parseNumList, help="Range of k-mer sizes in the formate <start>-<end>-<increment>."
" So 5-10-2 means [5, 7, 9]. If <end> is larger than the k-mer size"
"of the training data, this will automatically be reduced.")
# read in the arguments
args = parser.parse_args()
k_range = args.range
if k_range is None:
raise Exception("The --range argument is required, no matter what the help menu says.")
training_data = args.reference_file
query_file = args.in_file
results_file = args.out_file
npz_file = os.path.splitext(results_file)[0] + "_hit_matrix.npz"
num_threads = args.threads
location_of_thresh = args.location_of_thresh
coverage_threshold = args.containment_threshold
streaming_database_file = os.path.splitext(training_data)[0] + ".tst" # name of the tst training file
streaming_database_file = os.path.abspath(streaming_database_file)
hydra_file = args.filter_file
verbose = args.verbose
num_reads_per_core = args.reads_per_core
sensitive = args.sensitive
if not os.path.exists(streaming_database_file):
streaming_database_file = None
if args.plot_file:
plot_file = os.path.abspath(os.path.splitext(results_file)[0] + ".png")
# Import data and error checking
# Query file
if not os.path.exists(query_file):
raise Exception("Query file %s does not exist." % query_file)
if not os.path.exists(training_data):
raise Exception("Training/reference file %s does not exist." % training_data)
# Training data
if verbose:
print("Reading in sketches")
t0 = timeit.default_timer()
sketches = MH.import_multiple_from_single_hdf5(training_data)
if sketches[0]._kmers is None:
raise Exception(
"For some reason, the k-mers were not saved when the database was created. Try running MakeStreamingDNADatabase.py again.")
num_hashes = len(sketches[0]._kmers) # note: this is relying on the fact that the sketches were properly constructed
max_ksize = sketches[0].ksize
# adjust the k-range if necessary
k_range = [val for val in k_range if val <= max_ksize]
# adjust location of thresh if necessary
if location_of_thresh:
if location_of_thresh >= len(k_range):
print("Warning, k_range is of length %d, reducing location of threshold from %d to %d" % (len(k_range), location_of_thresh, len(k_range)))
location_of_thresh = len(k_range) - 1
# Get names of training files for use as rows in returned tabular data
training_file_names = []
for i in range(len(sketches)):
training_file_names.append(sketches[i].input_file_name)
if verbose:
print("Finished reading in sketches")
t1 = timeit.default_timer()
print("Time: %f" % (t1 - t0))
if verbose:
print("Reading in/creating ternary search tree")
t0 = timeit.default_timer()
# Make the Marissa tree
if streaming_database_file is None:
streaming_database_file = os.path.splitext(training_data)[0] + ".tst"
streaming_database_file = os.path.abspath(streaming_database_file)
print("It appears a tst training file has not been created (did you remember to use MakeStreamingDNADatabase.py?).")
print("I'm creating one anyway at: %s" % streaming_database_file)
print("This may take a while...")
to_insert = set()
for i in range(len(sketches)):
for kmer_index in range(len(sketches[i]._kmers)):
kmer = sketches[i]._kmers[kmer_index]
to_insert.add(kmer + 'x' + str(i) + 'x' + str(kmer_index)) # format here is kmer+x+hash_index+kmer_index
tree = mt.Trie(to_insert)
tree.save(streaming_database_file)
else:
tree = mt.Trie()
tree.load(streaming_database_file)
# all the k-mers of interest in a set (as a pre-filter)
if not hydra_file: # create one
try:
all_kmers_bf = WritingBloomFilter(len(sketches)*len(k_range)*num_hashes*2, 0.01)
for sketch in sketches:
for kmer in sketch._kmers:
for ksize in k_range:
all_kmers_bf.add(kmer[0:ksize]) # put all the k-mers and the appropriate suffixes in
all_kmers_bf.add(khmer.reverse_complement(kmer[0:ksize])) # also add the reverse complement
except IOError:
print("No such file or directory/error opening file: %s" % hydra_file)
sys.exit(1)
else: # otherwise read it in
try:
all_kmers_bf = ReadingBloomFilter(hydra_file)
except IOError:
print("No such file or directory/error opening file: %s" % hydra_file)
sys.exit(1)
if verbose:
print("Finished reading in/creating ternary search tree")
t1 = timeit.default_timer()
print("Time: %f" % (t1 - t0))
# Seen k-mers (set of k-mers that already hit the trie, so don't need to check again)
seen_kmers = set()
# shared object that will update the intersection counts
class Counters(object):
# This class is basically an array of counters (on the same basis as the sketches)
# it's used to keep track (in a parallel friendly way) of which streamed k-mers went into the training file sketches
def __init__(self):
pass
def return_matches(self, input_kmer, k_size_loc):
""" Get all the matches in the trie with the kmer prefix"""
match_info = set()
to_return = []
for kmer in [input_kmer, khmer.reverse_complement(input_kmer)]:
prefix_matches = tree.keys(kmer) # get all the k-mers whose prefix matches
#match_info = set()
# get the location of the found kmers in the counters
for item in prefix_matches:
split_string = item.split('x') # first is the hash location, second is which k-mer
hash_loc = int(split_string[1])
kmer_loc = int(split_string[2])
match_info.add((hash_loc, k_size_loc, kmer_loc))
#to_return = []
saw_match = False
if match_info:
saw_match = True
for tup in match_info:
to_return.append(tup)
return to_return, saw_match
def process_seq(self, seq):
# start with small kmer size, if see match, then continue looking for longer k-mer sizes, otherwise move on
small_k_size = k_range[0] # start with the small k-size
to_return = []
for i in range(len(seq) - small_k_size + 1): # look at all k-mers
kmer = seq[i:i + small_k_size]
possible_match = False
if kmer not in seen_kmers: # if we should process it
if kmer in all_kmers_bf: # if we should process it
match_list, saw_match = self.return_matches(kmer, 0)
if saw_match: # TODO: note, I *could* add all the trie matches and their sub-kmers to the seen_kmers
seen_kmers.add(kmer)
to_return.extend(match_list)
possible_match = True
# TODO: note: I could (since it'd only be for a single kmer size, keep a set of *all* small_kmers I've tried and use this as another pre-filter
else:
possible_match = True
# start looking at the other k_sizes, don't overhang len(seq)
if possible_match:
for other_k_size in [x for x in k_range[1:] if i+x <= len(seq)]:
kmer = seq[i:i + other_k_size]
if kmer in all_kmers_bf:
k_size_loc = k_range.index(other_k_size)
match_list, saw_match = self.return_matches(kmer, k_size_loc)
if saw_match:
to_return.extend(match_list)
else:
break
return to_return
# Initialize the counters
# TODO: note, I could be doing a partial dedup here, just to reduce the memory usage...
counter = Counters()
def map_func(sequence):
return counter.process_seq(sequence)
pool = multiprocessing.Pool(processes=num_threads)
if verbose:
print("Start streaming")
t0 = timeit.default_timer()
# populate the queue
fid = khmer.ReadParser(query_file) # This is faster than screed
match_tuples = []
#num_reads_per_core = 100000
num_reads_per_chunk = num_reads_per_core * num_threads
to_proc = [record.sequence for record in islice(fid, num_reads_per_chunk)]
i = 0
while to_proc:
i += len(to_proc)
if verbose:
print("Read in %d sequences" % i)
res = pool.map(map_func, to_proc, chunksize=max(1, min(num_reads_per_core, len(to_proc)/num_threads)))
flattened_res = [item for sublist in res if sublist for item in sublist]
flattened_res = list(set(flattened_res)) # dedup it
match_tuples.extend(flattened_res)
to_proc = [record.sequence for record in islice(fid, num_reads_per_chunk)]
fid.close()
#print(match_tuples)
if verbose:
print("Finished streaming")
t1 = timeit.default_timer()
print("Time: %f" % (t1 - t0))
if verbose:
print("Forming hit matrix")
t0 = timeit.default_timer()
#print("Len matches: %d" % len(match_tuples))
# create k_range spare matrices. Rows index by genomes (sketch/hash index), columns index by k_mer_loc
row_ind_dict = dict()
col_ind_dict = dict()
value_dict = dict()
unique_kmers = dict() # this will keep track of the unique k-mers seen in each genome (sketch/hash loc)
for k_size in k_range:
row_ind_dict[k_size] = []
col_ind_dict[k_size] = []
value_dict[k_size] = []
match_tuples = set(match_tuples) # uniquify, so we don't make the row/col ind dicts too large
###############################################################
with open(os.path.splitext(results_file)[0] + "_match_tuples.txt", "w") as fid:
for hash_loc, k_size_loc, kmer_loc in match_tuples:
fid.write("%d\t%d\t%d\n" % (hash_loc, k_size_loc, kmer_loc))
################################################################
for hash_loc, k_size_loc, kmer_loc in match_tuples:
if hash_loc not in unique_kmers:
unique_kmers[hash_loc] = set()
k_size = k_range[k_size_loc]
kmer = sketches[hash_loc]._kmers[kmer_loc][:k_size]
if kmer not in unique_kmers[hash_loc]: # if you've seen this k-mer before, don't add it. NOTE: this makes sure we don't over count
row_ind_dict[k_size].append(hash_loc)
col_ind_dict[k_size].append(kmer_loc)
value_dict[k_size].append(1)
unique_kmers[hash_loc].add(kmer)
hit_matrices = []
for k_size in k_range:
mat = csc_matrix((value_dict[k_size], (row_ind_dict[k_size], col_ind_dict[k_size])), shape=(len(sketches), num_hashes))
hit_matrices.append(mat)
############################################################
save_npz(os.path.splitext(results_file)[0] + "_first_hit_matrix_%d.npz" % k_size, mat)
############################################################
if verbose:
print("Finished forming hit matrix")
t1 = timeit.default_timer()
print("Time: %f" % (t1 - t0))
if verbose:
print("Computing containment indicies")
t0 = timeit.default_timer()
containment_indices = np.zeros((len(sketches), len(k_range))) # TODO: could make this thing sparse, or do the filtering for above threshold here
for k_size_loc in range(len(k_range)):
containment_indices[:, k_size_loc] = (hit_matrices[k_size_loc].sum(axis=1).ravel()) #/float(num_hashes))
for k_size_loc in range(len(k_range)):
k_size = k_range[k_size_loc]
for hash_loc in np.where(containment_indices[:, k_size_loc])[0]: # find the genomes with non-zero containment
unique_kmers = set()
for kmer in sketches[hash_loc]._kmers:
unique_kmers.add(kmer[:k_size]) # find the unique k-mers
containment_indices[hash_loc, k_size_loc] /= float(len(unique_kmers)) # divide by the unique num of k-mers
if verbose:
print("Finished computing containment indicies")
t1 = timeit.default_timer()
print("Time: %f" % (t1 - t0))
############################################################
savemat(os.path.splitext(results_file)[0] + "_first_containment_indices.mat", {'containment_indices':containment_indices})
############################################################
results = dict()
for k_size_loc in range(len(k_range)):
ksize = k_range[k_size_loc]
key = 'k=%d' % ksize
results[key] = containment_indices[:, k_size_loc]
df = pd.DataFrame(results, map(os.path.basename, training_file_names))
df = df.reindex(labels=['k=' + str(k_size) for k_size in k_range], axis=1) # sort columns in ascending order
sort_key = 'k=%d' % k_range[location_of_thresh]
max_key = 'k=%d' % k_range[-1]
filtered_results = df[df[sort_key] > coverage_threshold].sort_values(max_key, ascending=False) # only select those where the highest k-mer size's count is above the threshold
if True:
if verbose:
print("Exporting results")
t0 = timeit.default_timer()
filtered_results.to_csv(results_file+"non_filtered.csv", index=True, encoding='utf-8')
# TODO: may not have to do this if I do the pos-processing directly in here
# export the reduced hit matrices
# first, get the basis of the reduced data frame
to_select_names = list(filtered_results.index)
all_names = map(os.path.basename, training_file_names)
rows_to_select = []
for name in to_select_names:
rows_to_select.append(all_names.index(name))
hit_matrices_dict = dict()
# the reduce the hit matrix to this basis
for i in range(len(k_range)):
k_size = k_range[i]
hit_matrices_dict['k=%d' % k_size] = hit_matrices[i][rows_to_select, :]
# then export # TODO: not necessary if I do the post-processing right here
savemat(npz_file+"reduced_hit_matrix.npz", hit_matrices_dict, appendmat=False, do_compression=True)
# If requested, plot the results
if args.plot_file:
df = pd.read_csv(results_file) # annoyingly, I have to read it back in to get the format into something I can work with
dft = df.transpose()
fig = plt.figure()
for key in dft.keys():
plt.plot(k_range, dft[key].values[1:]) # [1:] since the first entry is the name of the file
plt.legend(dft.values[0])
plt.xlabel('K-mer size')
plt.ylabel('Containment Index')
fig.savefig(plot_file)
###############################################################################
if verbose and not sensitive:
print("Starting the post-processing")
t0 = timeit.default_timer()
if not sensitive:
# Do the post-processing
# Make the hit matrices dense
hit_matrices_dense_dict = dict()
for k_size in k_range:
hit_matrices_dense_dict['k=%d' % k_size] = hit_matrices_dict['k=%d' % k_size].todense()
hit_matrices_dict = hit_matrices_dense_dict
# get the count estimators of just the organisms of interest
CEs = MH.import_multiple_from_single_hdf5(training_data, import_list=to_select_names) # TODO: could make it a tad more memory efficient by sub-selecting the 'sketches'
all_kmers_with_counts = dict()
is_unique_kmer = set()
is_unique_kmer_per_ksize = dict()
for k_size in k_range:
is_unique_kmer_per_ksize[k_size] = set()
for i in range(len(CEs)):
for big_kmer in CEs[i]._kmers:
kmer = big_kmer[:k_size]
if kmer in all_kmers_with_counts:
all_kmers_with_counts[kmer] += 1
else:
all_kmers_with_counts[kmer] = 1
for kmer in all_kmers_with_counts.keys():
if all_kmers_with_counts[kmer] == 1:
k_size = len(kmer)
is_unique_kmer_per_ksize[k_size].add(kmer)
is_unique_kmer.add(kmer)
num_unique = dict()
for i in range(len(CEs)):
for k_size in k_range:
current_kmers = [k[:k_size] for k in CEs[i]._kmers]
current_kmers_set = set(current_kmers)
non_unique = set()
for kmer in current_kmers:
if kmer not in is_unique_kmer_per_ksize[k_size]:
non_unique.add(kmer)
to_zero_indicies = [ind for ind, kmer in enumerate(current_kmers) if kmer in non_unique]
hit_matrices_dict['k=%d' % k_size][i, to_zero_indicies] = 0 # set these to zero since they show up in other sketches (so not informative)
num_unique[i, k_range.index(k_size)] = len(current_kmers_set) - len(non_unique) # keep track of the size of the unique k-mers
# sum the modified hit matrices to get the size of the intersection
containment_indices = np.zeros((len(to_select_names), len(k_range))) # TODO: could make this thing sparse, or do the filtering for above threshold here
for k_size_loc in range(len(k_range)):
k_size = k_range[k_size_loc]
containment_indices[:, k_size_loc] = (
hit_matrices_dict['k=%d' % k_size].sum(axis=1).ravel()) # /float(num_hashes))
# then normalize by the number of unique k-mers (to get the containment index)
for k_size_loc in range(len(k_range)):
k_size = k_range[k_size_loc]
for hash_loc in np.where(containment_indices[:, k_size_loc])[0]: # find the genomes with non-zero containment
unique_kmers = set()
for kmer in CEs[hash_loc]._kmers:
unique_kmers.add(kmer[:k_size]) # find the unique k-mers
containment_indices[hash_loc, k_size_loc] /= float(
len(unique_kmers)) # TODO: this doesn't seem like the right way to normalize, but apparently it is!
# containment_indices[hash_loc, k_size_loc] /= float(num_unique[hash_loc, k_size_loc]) # divide by the unique num of k-mers
results = dict()
for k_size_loc in range(len(k_range)):
ksize = k_range[k_size_loc]
key = 'k=%d' % ksize
results[key] = containment_indices[:, k_size_loc]
df = pd.DataFrame(results, map(os.path.basename, to_select_names))
df = df.reindex(labels=['k=' + str(k_size) for k_size in k_range], axis=1) # sort columns in ascending order
sort_key = 'k=%d' % k_range[location_of_thresh]
max_key = 'k=%d' % k_range[-1]
filtered_results = df[df[sort_key] > coverage_threshold].sort_values(max_key, ascending=False) # only select those where the highest k-mer size's count is above the threshold
filtered_results.to_csv(results_file, index=True, encoding='utf-8')
if verbose:
t1 = timeit.default_timer()
print("Finished thresholding. Time: %f" % (t1 - t0))
#if verbose:
# print("Finished exporting results")
# t1 = timeit.default_timer()
# print("Time: %f" % (t1 - t0))
| bsd-3-clause |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/sklearn/utils/tests/test_estimator_checks.py | 7 | 4395 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import MultiTaskElasticNet
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class ChangesDict(BaseEstimator):
def __init__(self):
self.key = 0
def fit(self, X, y=None):
X, y = check_X_y(X, y)
return self
def predict(self, X):
X = check_array(X)
self.key = 1000
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check that estimator state does not change
# at transform/predict/predict_proba time
msg = 'Estimator changes __dict__ during predict'
assert_raises_regex(AssertionError, msg, check_estimator, ChangesDict)
# check for sparse matrix input handling
name = NoSparseClassifier.__name__
msg = "Estimator " + name + " doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(AdaBoostClassifier)
check_estimator(MultiTaskElasticNet)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
| mit |
rajat1994/scikit-learn | sklearn/datasets/tests/test_rcv1.py | 322 | 2414 | """Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
| bsd-3-clause |
ningchi/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 234 | 9928 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
yield
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
Warvito/pydeeplearn | code/old-version/MNISTdigits.py | 3 | 9937 | """ This module is manily created to test the deep belief and
rbm implementations on MNIST"""
__author__ = "Mihaela Rosca"
__contact__ = "[email protected]"
import argparse
import matplotlib.pyplot as plt
import numpy as np
import cPickle as pickle
import readmnist
import restrictedBoltzmannMachine as rbm
import deepbelief as db
import utils
import PCA
import glob
import DimensionalityReduction
from common import *
parser = argparse.ArgumentParser(description='RBM for digit recognition')
parser.add_argument('--save',dest='save',action='store_true', default=False,
help="if true, the network is serialized and saved")
parser.add_argument('--train',dest='train',action='store_true', default=False,
help=("if true, the network is trained from scratch from the"
"traning data"))
parser.add_argument('--pca', dest='pca',action='store_true', default=False,
help=("if true, the code for running PCA on the data is run"))
parser.add_argument('--rbm', dest='rbm',action='store_true', default=False,
help=("if true, the code for traning an rbm on the data is run"))
parser.add_argument('--rbmPCD', dest='rbmPCD',action='store_true', default=False,
help=("if true, the code for traning an rbm on the data is run"))
parser.add_argument('--db', dest='db',action='store_true', default=False,
help=("if true, the code for traning a deepbelief net on the"
"data is run"))
parser.add_argument('--trainSize', type=int, default=10000,
help='the number of tranining cases to be considered')
parser.add_argument('--testSize', type=int, default=1000,
help='the number of testing cases to be considered')
parser.add_argument('netFile', help="file where the serialized network should be saved")
parser.add_argument('--path',dest='path', default="MNIST", help="the path to the MNIST files")
# Get the arguments of the program
args = parser.parse_args()
def visualizeWeights(weights, imgShape, tileShape):
return utils.tile_raster_images(weights, imgShape,
tileShape, tile_spacing=(1, 1))
def rbmMain(reconstructRandom=True):
trainVectors, trainLabels =\
readmnist.read(0, args.trainSize, digits=None, bTrain=True, path=args.path)
testingVectors, testLabels =\
readmnist.read(0, args.testSize, digits=None, bTrain=False, path=args.path)
trainingScaledVectors = trainVectors / 255.0
testingScaledVectors = testingVectors / 255.0
# Train the network
if args.train:
# The number of hidden units is taken from a deep learning tutorial
# The data are the values of the images have to be normalized before being
# presented to the network
nrVisible = len(trainingScaledVectors[0])
nrHidden = 500
# use 1 dropout to test the rbm for now
net = rbm.RBM(nrVisible, nrHidden, rbm.contrastiveDivergence, 1, 1)
net.train(trainingScaledVectors)
t = visualizeWeights(net.weights.T, (28,28), (10,10))
else:
# Take the saved network and use that for reconstructions
f = open(args.netFile, "rb")
t = pickle.load(f)
net = pickle.load(f)
f.close()
# Reconstruct an image and see that it actually looks like a digit
test = testingScaledVectors[0,:]
# get a random image and see it looks like
if reconstructRandom:
test = np.random.random_sample(test.shape)
# Show the initial image first
plt.imshow(vectorToImage(test, (28,28)), cmap=plt.cm.gray)
plt.show()
# Show the reconstruction
recon = net.reconstruct(test.reshape(1, test.shape[0]))
plt.imshow(vectorToImage(recon, (28,28)), cmap=plt.cm.gray)
plt.axis('off')
plt.savefig('1.png', transparent=True)
# plt.show()
# Show the weights and their form in a tile fashion
# Plot the weights
plt.imshow(t, cmap=plt.cm.gray)
plt.axis('off')
plt.savefig('weights.png', transparent=True)
print "done"
if args.save:
f = open(args.netFile, "wb")
pickle.dump(t, f)
pickle.dump(net, f)
def rbmMainPCD():
trainVectors, trainLabels =\
readmnist.read(0, args.trainSize, digits=None, bTrain=True, path=args.path)
testingVectors, testLabels =\
readmnist.read(0, args.testSize, digits=None,bTrain=False, path=args.path)
trainingScaledVectors = trainVectors / 255.0
testingScaledVectors = testingVectors / 255.0
# Train the network
if args.train:
# The number of hidden units is taken from a deep learning tutorial
# The data are the values of the images have to be normalized before being
# presented to the network
nrVisible = len(trainingScaledVectors[0])
nrHidden = 500
# use 1 dropout to test the rbm for now
# net = rbm.RBM(nrVisible, nrHidden, rbm.contrastiveDivergence, 1, 1)
net = rbm.RBM(nrVisible, nrHidden, rbm.PCD, 1, 1)
net.train(trainingScaledVectors)
t = visualizeWeights(net.weights.T, (28,28), (10,10))
else:
# Take the saved network and use that for reconstructions
f = open(args.netFile, "rb")
t = pickle.load(f)
net = pickle.load(f)
f.close()
# Reconstruct a training image and see that it actually looks like a digit
test = testingScaledVectors[0,:]
plt.imshow(vectorToImage(test, (28,28)), cmap=plt.cm.gray)
plt.show()
recon = net.reconstruct(test.reshape(1, test.shape[0]))
plt.imshow(vectorToImage(recon, (28,28)), cmap=plt.cm.gray)
plt.show()
# Show the weights and their form in a tile fashion
plt.imshow(t, cmap=plt.cm.gray)
plt.axis('off')
plt.savefig('weightsPCDall.png', transparent=True)
print "done"
if args.save:
f = open(args.netFile, "wb")
pickle.dump(t, f)
pickle.dump(net, f)
def shuffle(data, labels):
indexShuffle = np.random.permutation(len(data))
shuffledData = np.array([data[i] for i in indexShuffle])
shuffledLabels = np.array([labels[i] for i in indexShuffle])
return shuffledData, shuffledLabels
def pcaOnMnist(training, dimension=700):
principalComponents = PCA.pca(training, dimension)
low, same = PCA.reduce(principalComponents, training)
image2DInitial = vectorToImage(training[0], (28,28))
print same[0].shape
image2D = vectorToImage(same[0], (28,28))
plt.imshow(image2DInitial, cmap=plt.cm.gray)
plt.show()
plt.imshow(image2D, cmap=plt.cm.gray)
plt.show()
print "done"
def deepbeliefMNIST():
training = args.trainSize
testing = args.testSize
trainVectors, trainLabels =\
readmnist.read(0, training, bTrain=True, path=args.path)
testVectors, testLabels =\
readmnist.read(0, testing, bTrain=False, path=args.path)
print trainVectors[0].shape
trainVectors, trainLabels = shuffle(trainVectors, trainLabels)
trainingScaledVectors = trainVectors / 255.0
testingScaledVectors = testVectors / 255.0
vectorLabels = labelsToVectors(trainLabels, 10)
if args.train:
# net = db.DBN(3, [784, 500, 10], [Sigmoid(), Softmax()])
# net = db.DBN(4, [784, 500, 500, 10], [Sigmoid, Sigmoid, Softmax])
net = db.DBN(5, [784, 1000, 1000, 1000, 10],
[Sigmoid, Sigmoid, Sigmoid, Softmax],
dropout=0.5, rbmDropout=0.5, visibleDropout=0.8,
rbmVisibleDropout=1)
# TODO: think about what the network should do for 2 layers
net.train(trainingScaledVectors, vectorLabels)
else:
# Take the saved network and use that for reconstructions
f = open(args.netFile, "rb")
net = pickle.load(f)
f.close()
probs, predicted = net.classify(testingScaledVectors)
correct = 0
for i in xrange(testing):
print "predicted"
print "probs"
print probs[i]
print predicted[i]
print "actual"
actual = testLabels[i]
print actual
correct += (predicted[i] == actual)
print "correct"
print correct
# for w in net.weights:
# print w
# for b in net.biases:
# print b
# t = visualizeWeights(net.weights[0].T, trainImages[0].(28, 28), (10,10))
# plt.imshow(t, cmap=plt.cm.gray)
# plt.show()
# print "done"
if args.save:
f = open(args.netFile, "wb")
pickle.dump(net, f)
f.close()
"""
Arguments:
big: should the big or small images be used?
folds: which folds should be used (1,..5) (a list). If None is passed all
folds are used
"""
def deepBeliefKanade(big=False, folds=None):
if big:
files = glob.glob('kanade_150*.pickle')
else:
files = glob.glob('kanade_f*.pickle')
if not folds:
folds = range(1, 6)
# Read the data from them. Sort out the files that do not have
# the folds that we want
# TODO: do this better (with regex in the file name)
# DO not reply on the order returned
files = files[folds]
data = []
labels = []
for filename in files:
with open(filename, "rb") as f:
# Sort out the labels from the data
dataAndLabels = pickle.load(f)
foldData = dataAndLabels[0:-1 ,:]
foldLabels = dataAndLabels[-1,:]
data.append(foldData)
labels.append(foldLabels)
# Do LDA
# Create the network
# Test
# You can also group the emotions into positive and negative to see
# if you can get better results (probably yes)
pass
# TODO: fix this (look at the ML coursework for it)
# Even better, use LDA
# think of normalizing them to 0.1 for pca as well
def pcaMain():
training = args.trainSize
testing = args.testSize
train, trainLabels =\
readmnist.read(0, training, bTrain=True, path=args.path)
testVectors, testLabels =\
readmnist.read(0, testing, bTrain=False, path=args.path)
print train[0].shape
pcaOnMnist(train, dimension=100)
def main():
if args.db + args.pca + args.rbm + args.rbmPCD != 1:
raise Exception("You decide on one main method to run")
if args.db:
deepbeliefMNIST()
if args.pca:
pcaMain()
if args.rbmPCD:
rbmMainPCD()
if args.rbm:
rbmMain()
if __name__ == '__main__':
main()
| bsd-3-clause |
madjelan/scikit-learn | sklearn/datasets/tests/test_lfw.py | 230 | 7880 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
chrisb13/imgtrkr | main.py | 1 | 4427 | #!/usr/bin/env python
# Author: Christopher Bull.
# Affiliation: Climate Change Research Centre and ARC Centre of Excellence for Climate System Science.
# Level 4, Mathews Building
# University of New South Wales
# Sydney, NSW, Australia, 2052
# Contact: [email protected]
# www: christopherbull.com.au
# Date created: Tue, 02 Feb 2016 16:02:13
# Machine created on: ccrc165
#
#see: https://github.com/docopt/docopt
#round brackets mean required square are optional
#download docopt from...
#https://raw.githubusercontent.com/docopt/docopt/master/docopt.py
"""
Tiny python package to help save metadata to a file such that you know what script created the file!
Usage:
main.py -h
main.py PNGPATH...
Options:
-h,--help : show this help message
PNGPATH : path to pngfile(s) you want the metadata for
Examples:
1] python main.py examples/test.png
2] See examples/pyeg.py
"""
from PIL import Image
from PIL import PngImagePlugin
import os
class AddTrkr(object):
"""
Class to add some image metadata too
Parameters
----------
pngpath:
metadata (optional): whatever metadata you want to record to the file, if left blank, cpath is required. If blank will record name of file, machine run on and time, will also save the figure for you!
cpath (required if metadata={}, otherwise leave empty): specify path of file that called this function
Returns
-------
Notes
-------
Example
--------
>>> #using custom metadata
>>> import imgtrkr as it
>>> it.AddTrkr('/home/nfs/z3457920/hdrive/repos/test.png',{'moogy':'sdfasdf'})
>>>
>>> #using automated metadata (will save the png too!)
>>> import imgtrkr as it
>>> it.AddTrkr('/home/nfs/z3457920/hdrive/repos/test.png',{},cpath=os.path.realpath(__file__))
"""
def __init__(self, pngpath,metadata={},cpath=''):
#super(AddTrkr, self).__init__()
self.pngpath = pngpath
self.metadata = metadata
self.cpath = cpath
self.addmet()
def addmet(self):
"""function that actually adds the metadata
:returns: @todo
"""
#f = "test.png"
#METADATA = {"version":"1.0", "OP":"ihuston"}
## Create a sample image
#import pylab as plt
#import numpy as np
#X = np.random.random((50,50))
#plt.imshow(X)
#plt.savefig(f)
# Use PIL to save some image metadata
#adding custom metadata
if self.metadata!={}:
im = Image.open(self.pngpath)
meta = PngImagePlugin.PngInfo()
for x in self.metadata:
meta.add_text(x, self.metadata[x])
im.save(self.pngpath, "png", pnginfo=meta)
else:
import matplotlib.pyplot as plt
import datetime
import socket
self.metadata={'Created with':self.cpath,'time':datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),'machine':socket.gethostname()}
plt.savefig(self.pngpath,dpi=300)
im = Image.open(self.pngpath)
meta = PngImagePlugin.PngInfo()
for x in self.metadata:
meta.add_text(x, self.metadata[x])
im.save(self.pngpath, "png", pnginfo=meta)
return
class RdTrkr(object):
"""
Class to add some image metadata too
Parameters
----------
pngpath:
metadata:
Returns
-------
Notes
-------
Example
--------
>>> import imgtrkr as it
>>> it.RdTrkr('/home/nfs/z3457920/hdrive/repos/test.png')
"""
def __init__(self, pngpath):
self.pngpath = pngpath
im2 = Image.open(self.pngpath)
#_lg.info(im2.info)
# import pdb; pdb.set_trace()
_lg.info("File: "+os.path.basename(self.pngpath)+" has metadata: "+str(im2.info))
# print im2.info
if __name__ == "__main__":
from docopt import docopt
arguments = docopt(__doc__)
from _cblogger import _LogStart
_lg=_LogStart().setup()
#if using argpasser
if len(arguments['PNGPATH'])==1:
RdTrkr(arguments['PNGPATH'][0])
elif len(arguments['PNGPATH'])>1:
for pngp in arguments['PNGPATH']:
RdTrkr(pngp)
else:
from ._cblogger import _LogStart
_lg=_LogStart().setup()
| gpl-3.0 |
justacec/bokeh | bokeh/charts/conftest.py | 12 | 1484 | """Defines chart-wide shared test fixtures."""
import numpy as np
import pandas as pd
import pytest
from bokeh.sampledata.autompg import autompg
class TestData(object):
"""Contains properties with easy access to data used across tests."""
def __init__(self):
self.cat_list = ['a', 'c', 'a', 'b']
self.list_data = [[1, 2, 3, 4], [2, 3, 4, 5]]
self.array_data = [np.array(item) for item in self.list_data]
self.dict_data = {'col1': self.list_data[0],
'col2': self.list_data[1]}
self.pd_data = pd.DataFrame(self.dict_data)
self.records_data = self.pd_data.to_dict(orient='records')
self.auto_data = autompg
self._setup_auto_mpg()
def _setup_auto_mpg(self):
# add a boolean column
self.auto_data['large_displ'] = self.auto_data['displ'] > 350
# add categorical column
cat = pd.Categorical.from_array(self.auto_data['cyl'])
new_order = list(reversed(sorted(cat.categories.values.tolist())))
self.auto_data['reversed_cyl'] = cat.reorder_categories(new_order)
@pytest.fixture(scope='module')
def test_data():
return TestData()
@pytest.fixture(scope='module')
def wide_data_with_cat(test_data):
data = test_data.dict_data.copy()
data['col3'] = test_data.cat_list
return data
@pytest.fixture(scope='module')
def df_with_cat_index(test_data):
return pd.DataFrame(test_data.dict_data, index=test_data.cat_list)
| bsd-3-clause |
cbmoore/statsmodels | statsmodels/sandbox/rls.py | 33 | 5179 | """Restricted least squares
from pandas
License: Simplified BSD
"""
from __future__ import print_function
import numpy as np
from statsmodels.regression.linear_model import WLS, GLS, RegressionResults
class RLS(GLS):
"""
Restricted general least squares model that handles linear constraints
Parameters
----------
endog: array-like
n length array containing the dependent variable
exog: array-like
n-by-p array of independent variables
constr: array-like
k-by-p array of linear constraints
param (0.): array-like or scalar
p-by-1 array (or scalar) of constraint parameters
sigma (None): scalar or array-like
The weighting matrix of the covariance. No scaling by default (OLS).
If sigma is a scalar, then it is converted into an n-by-n diagonal
matrix with sigma as each diagonal element.
If sigma is an n-length array, then it is assumed to be a diagonal
matrix with the given sigma on the diagonal (WLS).
Notes
-----
endog = exog * beta + epsilon
weights' * constr * beta = param
See Greene and Seaks, "The Restricted Least Squares Estimator:
A Pedagogical Note", The Review of Economics and Statistics, 1991.
"""
def __init__(self, endog, exog, constr, param=0., sigma=None):
N, Q = exog.shape
constr = np.asarray(constr)
if constr.ndim == 1:
K, P = 1, constr.shape[0]
else:
K, P = constr.shape
if Q != P:
raise Exception('Constraints and design do not align')
self.ncoeffs = Q
self.nconstraint = K
self.constraint = constr
if np.isscalar(param) and K > 1:
param = np.ones((K,)) * param
self.param = param
if sigma is None:
sigma = 1.
if np.isscalar(sigma):
sigma = np.ones(N) * sigma
sigma = np.squeeze(sigma)
if sigma.ndim == 1:
self.sigma = np.diag(sigma)
self.cholsigmainv = np.diag(np.sqrt(sigma))
else:
self.sigma = sigma
self.cholsigmainv = np.linalg.cholesky(np.linalg.pinv(self.sigma)).T
super(GLS, self).__init__(endog, exog)
_rwexog = None
@property
def rwexog(self):
"""Whitened exogenous variables augmented with restrictions"""
if self._rwexog is None:
P = self.ncoeffs
K = self.nconstraint
design = np.zeros((P + K, P + K))
design[:P, :P] = np.dot(self.wexog.T, self.wexog) #top left
constr = np.reshape(self.constraint, (K, P))
design[:P, P:] = constr.T #top right partition
design[P:, :P] = constr #bottom left partition
design[P:, P:] = np.zeros((K, K)) #bottom right partition
self._rwexog = design
return self._rwexog
_inv_rwexog = None
@property
def inv_rwexog(self):
"""Inverse of self.rwexog"""
if self._inv_rwexog is None:
self._inv_rwexog = np.linalg.inv(self.rwexog)
return self._inv_rwexog
_rwendog = None
@property
def rwendog(self):
"""Whitened endogenous variable augmented with restriction parameters"""
if self._rwendog is None:
P = self.ncoeffs
K = self.nconstraint
response = np.zeros((P + K,))
response[:P] = np.dot(self.wexog.T, self.wendog)
response[P:] = self.param
self._rwendog = response
return self._rwendog
_ncp = None
@property
def rnorm_cov_params(self):
"""Parameter covariance under restrictions"""
if self._ncp is None:
P = self.ncoeffs
self._ncp = self.inv_rwexog[:P, :P]
return self._ncp
_wncp = None
@property
def wrnorm_cov_params(self):
"""
Heteroskedasticity-consistent parameter covariance
Used to calculate White standard errors.
"""
if self._wncp is None:
df = self.df_resid
pred = np.dot(self.wexog, self.coeffs)
eps = np.diag((self.wendog - pred) ** 2)
sigmaSq = np.sum(eps)
pinvX = np.dot(self.rnorm_cov_params, self.wexog.T)
self._wncp = np.dot(np.dot(pinvX, eps), pinvX.T) * df / sigmaSq
return self._wncp
_coeffs = None
@property
def coeffs(self):
"""Estimated parameters"""
if self._coeffs is None:
betaLambda = np.dot(self.inv_rwexog, self.rwendog)
self._coeffs = betaLambda[:self.ncoeffs]
return self._coeffs
def fit(self):
rncp = self.wrnorm_cov_params
lfit = RegressionResults(self, self.coeffs, normalized_cov_params=rncp)
return lfit
if __name__=="__main__":
import statsmodels.api as sm
dta = np.genfromtxt('./rlsdata.txt', names=True)
design = np.column_stack((dta['Y'],dta['Y']**2,dta[['NE','NC','W','S']].view(float).reshape(dta.shape[0],-1)))
design = sm.add_constant(design, prepend=True)
rls_mod = RLS(dta['G'],design, constr=[0,0,0,1,1,1,1])
rls_fit = rls_mod.fit()
print(rls_fit.params)
| bsd-3-clause |
DGrady/pandas | pandas/compat/__init__.py | 4 | 10892 | """
compat
======
Cross-compatible functions for Python 2 and 3.
Key items to import for 2/3 compatible code:
* iterators: range(), map(), zip(), filter(), reduce()
* lists: lrange(), lmap(), lzip(), lfilter()
* unicode: u() [no unicode builtin in Python 3]
* longs: long (int in Python 3)
* callable
* iterable method compatibility: iteritems, iterkeys, itervalues
* Uses the original method if available, otherwise uses items, keys, values.
* types:
* text_type: unicode in Python 2, str in Python 3
* binary_type: str in Python 2, bytes in Python 3
* string_types: basestring in Python 2, str in Python 3
* bind_method: binds functions to classes
* add_metaclass(metaclass) - class decorator that recreates class with with the
given metaclass instead (and avoids intermediary class creation)
Other items:
* platform checker
"""
# pylint disable=W0611
# flake8: noqa
import functools
import itertools
from distutils.version import LooseVersion
from itertools import product
import sys
import platform
import types
from unicodedata import east_asian_width
import struct
import inspect
from collections import namedtuple
PY2 = sys.version_info[0] == 2
PY3 = (sys.version_info[0] >= 3)
PY35 = (sys.version_info >= (3, 5))
PY36 = (sys.version_info >= (3, 6))
PYPY = (platform.python_implementation() == 'PyPy')
try:
import __builtin__ as builtins
# not writeable when instantiated with string, doesn't handle unicode well
from cStringIO import StringIO as cStringIO
# always writeable
from StringIO import StringIO
BytesIO = StringIO
import cPickle
import httplib
except ImportError:
import builtins
from io import StringIO, BytesIO
cStringIO = StringIO
import pickle as cPickle
import http.client as httplib
from pandas.compat.chainmap import DeepChainMap
if PY3:
def isidentifier(s):
return s.isidentifier()
def str_to_bytes(s, encoding=None):
return s.encode(encoding or 'ascii')
def bytes_to_str(b, encoding=None):
return b.decode(encoding or 'utf-8')
# The signature version below is directly copied from Django,
# https://github.com/django/django/pull/4846
def signature(f):
sig = inspect.signature(f)
args = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
varargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_POSITIONAL
]
varargs = varargs[0] if varargs else None
keywords = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_KEYWORD
]
keywords = keywords[0] if keywords else None
defaults = [
p.default for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
and p.default is not p.empty
] or None
argspec = namedtuple('Signature', ['args', 'defaults',
'varargs', 'keywords'])
return argspec(args, defaults, varargs, keywords)
# have to explicitly put builtins into the namespace
range = range
map = map
zip = zip
filter = filter
intern = sys.intern
reduce = functools.reduce
long = int
unichr = chr
# This was introduced in Python 3.3, but we don't support
# Python 3.x < 3.5, so checking PY3 is safe.
FileNotFoundError = FileNotFoundError
# list-producing versions of the major Python iterating functions
def lrange(*args, **kwargs):
return list(range(*args, **kwargs))
def lzip(*args, **kwargs):
return list(zip(*args, **kwargs))
def lmap(*args, **kwargs):
return list(map(*args, **kwargs))
def lfilter(*args, **kwargs):
return list(filter(*args, **kwargs))
else:
# Python 2
import re
_name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
FileNotFoundError = IOError
def isidentifier(s, dotted=False):
return bool(_name_re.match(s))
def str_to_bytes(s, encoding='ascii'):
return s
def bytes_to_str(b, encoding='ascii'):
return b
def signature(f):
return inspect.getargspec(f)
# import iterator versions of these functions
range = xrange
intern = intern
zip = itertools.izip
filter = itertools.ifilter
map = itertools.imap
reduce = reduce
long = long
unichr = unichr
# Python 2-builtin ranges produce lists
lrange = builtins.range
lzip = builtins.zip
lmap = builtins.map
lfilter = builtins.filter
if PY2:
def iteritems(obj, **kw):
return obj.iteritems(**kw)
def iterkeys(obj, **kw):
return obj.iterkeys(**kw)
def itervalues(obj, **kw):
return obj.itervalues(**kw)
next = lambda it: it.next()
else:
def iteritems(obj, **kw):
return iter(obj.items(**kw))
def iterkeys(obj, **kw):
return iter(obj.keys(**kw))
def itervalues(obj, **kw):
return iter(obj.values(**kw))
next = next
def bind_method(cls, name, func):
"""Bind a method to class, python 2 and python 3 compatible.
Parameters
----------
cls : type
class to receive bound method
name : basestring
name of method on class instance
func : function
function to be bound as method
Returns
-------
None
"""
# only python 2 has bound/unbound method issue
if not PY3:
setattr(cls, name, types.MethodType(func, None, cls))
else:
setattr(cls, name, func)
# ----------------------------------------------------------------------------
# functions largely based / taken from the six module
# Much of the code in this module comes from Benjamin Peterson's six library.
# The license for this library can be found in LICENSES/SIX and the code can be
# found at https://bitbucket.org/gutworth/six
# Definition of East Asian Width
# http://unicode.org/reports/tr11/
# Ambiguous width can be changed by option
_EAW_MAP = {'Na': 1, 'N': 1, 'W': 2, 'F': 2, 'H': 1}
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
def u(s):
return s
def u_safe(s):
return s
def strlen(data, encoding=None):
# encoding is for compat with PY2
return len(data)
def east_asian_len(data, encoding=None, ambiguous_width=1):
"""
Calculate display width considering unicode East Asian Width
"""
if isinstance(data, text_type):
return sum([_EAW_MAP.get(east_asian_width(c), ambiguous_width) for c in data])
else:
return len(data)
def import_lzma():
""" import lzma from the std library """
import lzma
return lzma
def set_function_name(f, name, cls):
""" Bind the name/qualname attributes of the function """
f.__name__ = name
f.__qualname__ = '{klass}.{name}'.format(
klass=cls.__name__,
name=name)
f.__module__ = cls.__module__
return f
ResourceWarning = ResourceWarning
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
def u(s):
return unicode(s, "unicode_escape")
def u_safe(s):
try:
return unicode(s, "unicode_escape")
except:
return s
def strlen(data, encoding=None):
try:
data = data.decode(encoding)
except UnicodeError:
pass
return len(data)
def east_asian_len(data, encoding=None, ambiguous_width=1):
"""
Calculate display width considering unicode East Asian Width
"""
if isinstance(data, text_type):
try:
data = data.decode(encoding)
except UnicodeError:
pass
return sum([_EAW_MAP.get(east_asian_width(c), ambiguous_width) for c in data])
else:
return len(data)
def import_lzma():
""" import the backported lzma library
or raise ImportError if not available """
from backports import lzma
return lzma
def set_function_name(f, name, cls):
""" Bind the name attributes of the function """
f.__name__ = name
return f
class ResourceWarning(Warning):
pass
string_and_binary_types = string_types + (binary_type,)
try:
# callable reintroduced in later versions of Python
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
for slots_var in orig_vars.get('__slots__', ()):
orig_vars.pop(slots_var)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
from collections import OrderedDict, Counter
if PY3:
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc.with_traceback(traceback)
else:
# this version of raise is a syntax error in Python 3
exec("""
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc, None, traceback
""")
raise_with_traceback.__doc__ = """Raise exception with existing traceback.
If traceback is not passed, uses sys.exc_info() to get traceback."""
# http://stackoverflow.com/questions/4126348
# Thanks to @martineau at SO
from dateutil import parser as _date_parser
import dateutil
if LooseVersion(dateutil.__version__) < '2.0':
@functools.wraps(_date_parser.parse)
def parse_date(timestr, *args, **kwargs):
timestr = bytes(timestr)
return _date_parser.parse(timestr, *args, **kwargs)
elif PY2 and LooseVersion(dateutil.__version__) == '2.0':
# dateutil brokenness
raise Exception('dateutil 2.0 incompatible with Python 2.x, you must '
'install version 1.5 or 2.1+!')
else:
parse_date = _date_parser.parse
# https://github.com/pandas-dev/pandas/pull/9123
def is_platform_little_endian():
""" am I little endian """
return sys.byteorder == 'little'
def is_platform_windows():
return sys.platform == 'win32' or sys.platform == 'cygwin'
def is_platform_linux():
return sys.platform == 'linux2'
def is_platform_mac():
return sys.platform == 'darwin'
def is_platform_32bit():
return struct.calcsize("P") * 8 < 64
| bsd-3-clause |
nicolargo/intellij-community | python/helpers/pydev/pydev_ipython/qt_for_kernel.py | 67 | 2337 | """ Import Qt in a manner suitable for an IPython kernel.
This is the import used for the `gui=qt` or `matplotlib=qt` initialization.
Import Priority:
if Qt4 has been imported anywhere else:
use that
if matplotlib has been imported and doesn't support v2 (<= 1.0.1):
use PyQt4 @v1
Next, ask ETS' QT_API env variable
if QT_API not set:
ask matplotlib via rcParams['backend.qt4']
if it said PyQt:
use PyQt4 @v1
elif it said PySide:
use PySide
else: (matplotlib said nothing)
# this is the default path - nobody told us anything
try:
PyQt @v1
except:
fallback on PySide
else:
use PyQt @v2 or PySide, depending on QT_API
because ETS doesn't work with PyQt @v1.
"""
import os
import sys
from pydev_ipython.version import check_version
from pydev_ipython.qt_loaders import (load_qt, QT_API_PYSIDE,
QT_API_PYQT, QT_API_PYQT_DEFAULT,
loaded_api)
#Constraints placed on an imported matplotlib
def matplotlib_options(mpl):
if mpl is None:
return
mpqt = mpl.rcParams.get('backend.qt4', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyside':
return [QT_API_PYSIDE]
elif mpqt.lower() == 'pyqt4':
return [QT_API_PYQT_DEFAULT]
raise ImportError("unhandled value for backend.qt4 from matplotlib: %r" %
mpqt)
def get_options():
"""Return a list of acceptable QT APIs, in decreasing order of
preference
"""
#already imported Qt somewhere. Use that
loaded = loaded_api()
if loaded is not None:
return [loaded]
mpl = sys.modules.get('matplotlib', None)
if mpl is not None and not check_version(mpl.__version__, '1.0.2'):
#1.0.1 only supports PyQt4 v1
return [QT_API_PYQT_DEFAULT]
if os.environ.get('QT_API', None) is None:
#no ETS variable. Ask mpl, then use either
return matplotlib_options(mpl) or [QT_API_PYQT_DEFAULT, QT_API_PYSIDE]
#ETS variable present. Will fallback to external.qt
return None
api_opts = get_options()
if api_opts is not None:
QtCore, QtGui, QtSvg, QT_API = load_qt(api_opts)
else: # use ETS variable
from pydev_ipython.qt import QtCore, QtGui, QtSvg, QT_API
| apache-2.0 |
mdhaber/scipy | scipy/stats/kde.py | 5 | 21517 | #-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Written by: Robert Kern
#
# Date: 2004-08-09
#
# Modified: 2005-02-10 by Robert Kern.
# Contributed to SciPy
# 2005-10-07 by Robert Kern.
# Some fixes to match the new scipy_core
#
# Copyright 2004-2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
# Standard library imports.
import warnings
# SciPy imports.
from scipy import linalg, special
from scipy.special import logsumexp
from scipy._lib._util import check_random_state
from numpy import (asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi,
sqrt, ravel, power, atleast_1d, squeeze, sum, transpose,
ones, cov)
import numpy as np
# Local imports.
from . import mvn
from ._stats import gaussian_kernel_estimate
__all__ = ['gaussian_kde']
class gaussian_kde:
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
weights : array_like, optional
weights of datapoints. This must be the same shape as dataset.
If None (default), the samples are assumed to be equally weighted
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
neff : int
Effective number of datapoints.
.. versionadded:: 1.2.0
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`. The square
of `kde.factor` multiplies the covariance matrix of the data in the kde
estimation.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
evaluate
__call__
integrate_gaussian
integrate_box_1d
integrate_box
integrate_kde
pdf
logpdf
resample
set_bandwidth
covariance_factor
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
In the case of unequally weighted points, `scotts_factor` becomes::
neff**(-1./(d+4)),
with ``neff`` the effective number of datapoints.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
or in the case of unequally weighted points::
(neff * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
With a set of weighted samples, the effective number of datapoints ``neff``
is defined by::
neff = sum(weights)^2 / sum(weights^2)
as detailed in [5]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
.. [5] Gray P. G., 1969, Journal of the Royal Statistical Society.
Series A (General), 132, 272
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
... "Measurement model, return two coupled measurements."
... m1 = np.random.normal(size=n)
... m2 = np.random.normal(scale=0.5, size=n)
... return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None, weights=None):
self.dataset = atleast_2d(asarray(dataset))
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
if weights is not None:
self._weights = atleast_1d(weights).astype(float)
self._weights /= sum(self._weights)
if self.weights.ndim != 1:
raise ValueError("`weights` input should be one-dimensional.")
if len(self._weights) != self.n:
raise ValueError("`weights` input should be of length n")
self._neff = 1/sum(self._weights**2)
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(asarray(points))
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
output_dtype = np.common_type(self.covariance, points)
itemsize = np.dtype(output_dtype).itemsize
if itemsize == 4:
spec = 'float'
elif itemsize == 8:
spec = 'double'
elif itemsize in (12, 16):
spec = 'long double'
else:
raise TypeError('%s has unexpected item size %d' %
(output_dtype, itemsize))
result = gaussian_kernel_estimate[spec](self.dataset.T, self.weights[:, None],
points.T, self.inv_cov, output_dtype)
return result[:, 0]
__call__ = evaluate
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
# This will raise LinAlgError if the new cov matrix is not s.p.d
# cho_factor returns (ndarray, bool) where bool is a flag for whether
# or not ndarray is upper or lower triangular
sum_cov_chol = linalg.cho_factor(sum_cov)
diff = self.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies)*self.weights, axis=0) / norm_const
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.sum(self.weights*(
special.ndtr(normalized_high) -
special.ndtr(normalized_low)))
return value
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
"""Computes the integral of a pdf over a rectangular interval.
Parameters
----------
low_bounds : array_like
A 1-D array containing the lower bounds of integration.
high_bounds : array_like
A 1-D array containing the upper bounds of integration.
maxpts : int, optional
The maximum number of points to use for integration.
Returns
-------
value : scalar
The result of the integral.
"""
if maxpts is not None:
extra_kwds = {'maxpts': maxpts}
else:
extra_kwds = {}
value, inform = mvn.mvnun_weighted(low_bounds, high_bounds,
self.dataset, self.weights,
self.covariance, **extra_kwds)
if inform:
msg = ('An integral in mvn.mvnun requires more points than %s' %
(self.d * 1000))
warnings.warn(msg)
return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
sum_cov_chol = linalg.cho_factor(sum_cov)
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies)*large.weights, axis=0)*small.weights[i]
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
result /= norm_const
return result
def resample(self, size=None, seed=None):
"""Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the effective number of samples in the underlying
dataset.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = int(self.neff)
random_state = check_random_state(seed)
norm = transpose(random_state.multivariate_normal(
zeros((self.d,), float), self.covariance, size=size
))
indices = random_state.choice(self.n, size=size, p=self.weights)
means = self.dataset[:, indices]
return means + norm
def scotts_factor(self):
"""Compute Scott's factor.
Returns
-------
s : float
Scott's factor.
"""
return power(self.neff, -1./(self.d+4))
def silverman_factor(self):
"""Compute the Silverman factor.
Returns
-------
s : float
The silverman factor.
"""
return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
multiplies the data covariance matrix to obtain the kernel covariance
matrix. The default is `scotts_factor`. A subclass can overwrite this
method to provide a different method, or set it through a call to
`kde.set_bandwidth`."""
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> import scipy.stats as stats
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x1, np.full(x1.shape, 1 / (4. * x1.size)), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, str):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,
bias=False,
aweights=self.weights))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
L = linalg.cholesky(self.covariance*2*pi)
self.log_det = 2*np.log(np.diag(L)).sum()
def pdf(self, x):
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
def logpdf(self, x):
"""
Evaluate the log of the estimated pdf on a provided set of points.
"""
points = atleast_2d(x)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
if m >= self.n:
# there are more points than data, so loop over data
energy = np.empty((self.n, m), dtype=float)
for i in range(self.n):
diff = self.dataset[:, i, newaxis] - points
tdiff = dot(self.inv_cov, diff)
energy[i] = sum(diff*tdiff, axis=0)
log_to_sum = 2.0 * np.log(self.weights) - self.log_det - energy.T
result = logsumexp(0.5 * log_to_sum, axis=1)
else:
# loop over points
result = np.empty((m,), dtype=float)
for i in range(m):
diff = self.dataset - points[:, i, newaxis]
tdiff = dot(self.inv_cov, diff)
energy = sum(diff * tdiff, axis=0)
log_to_sum = 2.0 * np.log(self.weights) - self.log_det - energy
result[i] = logsumexp(0.5 * log_to_sum)
return result
@property
def weights(self):
try:
return self._weights
except AttributeError:
self._weights = ones(self.n)/self.n
return self._weights
@property
def neff(self):
try:
return self._neff
except AttributeError:
self._neff = 1/sum(self.weights**2)
return self._neff
| bsd-3-clause |
hugobowne/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 110 | 3768 | # Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
francisco-dlp/hyperspy | hyperspy/tests/signal/test_2D_tools.py | 3 | 6247 | # Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from unittest import mock
import numpy.testing as npt
import numpy as np
from scipy.misc import face, ascent
from scipy.ndimage import fourier_shift
import pytest
import hyperspy.api as hs
from hyperspy.decorators import lazifyTestClass
def _generate_parameters():
parameters = []
for normalize_corr in [False, True]:
for reference in ['current', 'cascade', 'stat']:
parameters.append([normalize_corr, reference])
return parameters
@lazifyTestClass
class TestSubPixelAlign:
def setup_method(self, method):
ref_image = ascent()
center = np.array((256, 256))
shifts = np.array([(0.0, 0.0), (4.3, 2.13), (1.65, 3.58),
(-2.3, 2.9), (5.2, -2.1), (2.7, 2.9),
(5.0, 6.8), (-9.1, -9.5), (-9.0, -9.9),
(-6.3, -9.2)])
s = hs.signals.Signal2D(np.zeros((10, 100, 100)))
for i in range(10):
# Apply each sup-pixel shift using FFT and InverseFFT
offset_image = fourier_shift(np.fft.fftn(ref_image), shifts[i])
offset_image = np.fft.ifftn(offset_image).real
# Crop central regions of shifted images to avoid wrap around
s.data[i, ...] = offset_image[center[0]:center[0] + 100,
center[1]:center[1] + 100]
self.signal = s
self.shifts = shifts
def test_align_subpix(self):
# Align signal
s = self.signal
shifts = self.shifts
s.align2D(shifts=shifts)
# Compare by broadcasting
np.testing.assert_allclose(s.data[4], s.data[0], rtol=0.5)
@pytest.mark.parametrize(("normalize_corr", "reference"),
_generate_parameters())
def test_estimate_subpix(self, normalize_corr, reference):
s = self.signal
shifts = s.estimate_shift2D(sub_pixel_factor=200,
normalize_corr=normalize_corr)
np.testing.assert_allclose(shifts, self.shifts, rtol=0.2, atol=0.2,
verbose=True)
@pytest.mark.parametrize(("plot"), [True, 'reuse'])
def test_estimate_subpix_plot(self, plot):
# To avoid this function plotting many figures and holding the test, we
# make sure the backend is set to `agg` in case it is set to something
# else in the testing environment
import matplotlib.pyplot as plt
plt.switch_backend('agg')
s = self.signal
s.estimate_shift2D(sub_pixel_factor=200, plot=plot)
@lazifyTestClass
class TestAlignTools:
def setup_method(self, method):
im = face(gray=True)
self.ascent_offset = np.array((256, 256))
s = hs.signals.Signal2D(np.zeros((10, 100, 100)))
self.scales = np.array((0.1, 0.3))
self.offsets = np.array((-2, -3))
izlp = []
for ax, offset, scale in zip(
s.axes_manager.signal_axes, self.offsets, self.scales):
ax.scale = scale
ax.offset = offset
izlp.append(ax.value2index(0))
self.izlp = izlp
self.ishifts = np.array([(0, 0), (4, 2), (1, 3), (-2, 2), (5, -2),
(2, 2), (5, 6), (-9, -9), (-9, -9), (-6, -9)])
self.new_offsets = self.offsets - self.ishifts.min(0) * self.scales
zlp_pos = self.ishifts + self.izlp
for i in range(10):
slices = self.ascent_offset - zlp_pos[i, ...]
s.data[i, ...] = im[slices[0]:slices[0] + 100,
slices[1]:slices[1] + 100]
self.signal = s
# How image should be after successfull alignment
smin = self.ishifts.min(0)
smax = self.ishifts.max(0)
offsets = self.ascent_offset + self.offsets / self.scales - smin
size = np.array((100, 100)) - (smax - smin)
self.aligned = im[int(offsets[0]):int(offsets[0] + size[0]),
int(offsets[1]):int(offsets[1] + size[1])]
def test_estimate_shift(self):
s = self.signal
shifts = s.estimate_shift2D()
print(shifts)
print(self.ishifts)
assert np.allclose(shifts, self.ishifts)
def test_align(self):
# Align signal
m = mock.Mock()
s = self.signal
s.events.data_changed.connect(m.data_changed)
s.align2D()
# Compare by broadcasting
assert np.all(s.data == self.aligned)
assert m.data_changed.called
def test_align_expand(self):
s = self.signal
s.align2D(expand=True)
# Check the numbers of NaNs to make sure expansion happened properly
ds = self.ishifts.max(0) - self.ishifts.min(0)
Nnan = np.sum(ds) * 100 + np.prod(ds)
Nnan_data = np.sum(1 * np.isnan(s.data), axis=(1, 2))
# Due to interpolation, the number of NaNs in the data might
# be 2 higher (left and right side) than expected
assert np.all(Nnan_data - Nnan <= 2)
# Check alignment is correct
d_al = s.data[:, ds[0]:-ds[0], ds[1]:-ds[1]]
assert np.all(d_al == self.aligned)
def test_add_ramp():
s = hs.signals.Signal2D(np.indices((3, 3)).sum(axis=0) + 4)
s.add_ramp(-1, -1, -4)
npt.assert_allclose(s.data, 0)
def test_add_ramp_lazy():
s = hs.signals.Signal2D(np.indices((3, 3)).sum(axis=0) + 4).as_lazy()
s.add_ramp(-1, -1, -4)
npt.assert_almost_equal(s.data.compute(), 0)
if __name__ == '__main__':
import pytest
pytest.main(__name__)
| gpl-3.0 |
egorburlakov/CrisisModelingPython | Modeling.py | 1 | 9423 | import numpy as np
import networkx as nx
import pandas as pd
import bisect #for insort_left
import time
from datetime import datetime
import matplotlib.pyplot as plt
from CrisisModel import Crisis
from OrgModel import Org
cols = ["NInf", "NSigs", "NSigCaught", "NMissed", "NDecMade", "ImpCaught", "ImpMissed", "ImpDecMade", "NEmps", "MinSpan", "MaxSpan", "MaxLev", "CrT", "EmpT", "TpT"]
rec = {"NInf" : [], "NSigs" : [], "NSigCaught" : [], "NMissed" : [], "NDecMade" : [], "ImpCaught" : [], "ImpMissed" : [], "ImpDecMade" : [], "NEmps" : [],
"MinSpan" : [], "MaxSpan" : [], "MaxLev" : [], "CrT" : [], "EmpT" : [], "TpT" : []} #record on one experiment
class Event(object):
def __init__(self, t_e, sig_id_e, emp_id_e, s_st = 0): #stat - status of the signal to be processed
self.t = t_e #time when the event happens
self.sig_id, self.emp_id, self.s_stat = sig_id_e, emp_id_e, s_st
def __eq__(self, other):
return self.t == other.t
def __lt__(self, other):
return self.t < other.t
def __repr__(self):
return [self.t, self.sig_id, self.emp_id, self.s_stat].__repr__()
def nextStat(o, emp, e, stat_n): #sets employees pars ready for the next stat
emp["t0"] = e.t + emp["t_proc"] + 1
emp["stat"] = stat_n
emp["t_proc"] = np.random.gamma(o.t_proc[stat_n], 1) #mean = var = o.t_proc[stat_n]
emp["sig_proc"] = e.sig_id if stat_n > 0 else -1
emp["t_proc_tot"] += emp["t_proc"]
return emp
def catchSig(e, cr, o, emp):
if emp["stat"] != 0: #If the employee is busy we wait until he is free
return Event(emp["t0"] + emp["t_proc"] + 1, e.sig_id, e.emp_id)
s0 = cr.Sigs[e.sig_id]
if e.t > s0["app"] + s0["dapp"]: #if signal disappears
imp_caught = s0["imp"]
if imp_caught >= cr.noise_th:
rec["NMissed"][-1] += 1 #statistics
rec["ImpMissed"][-1] += imp_caught
return []
if np.random.choice(2, 1, p = [1 - cr.av, cr.av]): #otherwise he tries to catch the signal
e1 = Event(e.t + emp["t_proc"] + 1, e.sig_id, e.emp_id, emp["stat"] + 1) #and moves to the next status if succeeds
emp = nextStat(o, emp, e, emp["stat"] + 1)
imp_caught = s0["imp"]
if imp_caught >= cr.noise_th:
rec["NSigCaught"][-1] += 1 #statistics
rec["ImpCaught"][-1] += imp_caught
return e1
else:
return Event(e.t + np.random.gamma(o.t_proc[0]), e.sig_id, e.emp_id) #if he doesn't succeed to catch, he tries to catch the signal the next time step if the signal is still available
def evalSig(e, cr, o, emp):
if (emp["sig_proc"] >= 0) & (emp["sig_proc"] != e.sig_id): #If the employee is busy we wait until he is free
return Event(emp["t0"] + emp["t_proc"] + 1, e.sig_id, e.emp_id, 1)
s0 = cr.Sigs[e.sig_id]
if s0["imp_eval"]: #if someone has already assessed the signal
imp = min(max(np.random.normal(s0["imp_eval"][-1], o.var_imp_eval), 0), 1)
else:
imp = min(max(np.random.normal(s0["imp"], o.var_imp_eval), 0), 1)
s0["imp_eval"].append(imp) #update the list of evaluations from employees
if imp > o.s2: #transfer signal
if s0["ag"] == e.emp_id: #if it's the decision maker to preparation, otherwise - to transferring
e1 = Event(e.t + emp["t_proc"] + 1, e.sig_id, e.emp_id, 4)
emp = nextStat(o, emp, e, 4)
else:
e1 = Event(e.t + emp["t_proc"] + 1, e.sig_id, e.emp_id, emp["stat"] + 1)
emp = nextStat(o, emp, e, emp["stat"] + 1)
return e1
elif imp >= o.s1: #active monitoring
e1 = Event(e.t + emp["t_proc"] + 1, e.sig_id, e.emp_id, 3)
emp = nextStat(o, emp, e, 3)
return e1
else: #don't process anymore
emp = nextStat(o, emp, e, 0)
return []
def transSig(e, cr, o, emp):
emp_n = nx.shortest_path(o.g, e.emp_id, cr.Sigs[e.sig_id]["ag"])[1] #next angent in the path, might be similar to previous if he is a decision maker
e1 = Event(e.t + emp["t_proc"] + 1, e.sig_id, emp_n, 1) #catch or evaluate?
emp = nextStat(o, emp, e, 0) #the employee returns to monitoring status
return e1
def actmonSig(e, cr, o, emp):
s0 = cr.Sigs[e.sig_id]
imp = s0["imp"] # the employees knows the true importance
s0["imp_eval"].append(imp) #update the list of evaluations from employees
if imp >= (o.s2 + o.s1) / 2: #transfer signal
if s0["ag"] == e.emp_id: #if it's the target agent - preparation, otherwise - transferring
e1 = Event(e.t + emp["t_proc"] + 1, e.sig_id, e.emp_id, 4)
emp = nextStat(o, emp, e, 4)
else:
e1 = Event(e.t + emp["t_proc"] + 1, e.sig_id, e.emp_id, 2)
emp = nextStat(o, emp, e, 2)
return e1
else: #don't process anymore
emp = nextStat(o, emp, e, 0)
return []
def prepSig(e, cr, o, emp):
emp = nextStat(o, emp, e, 0)
imp_caught = cr.Sigs[e.sig_id]["imp"]
if imp_caught >= cr.noise_th:
rec["NDecMade"][-1] += 1 #statistics
rec["ImpDecMade"][-1] += imp_caught
return []
handleEvent = {0 : catchSig, 1 : evalSig, 2 : transSig, 3 : actmonSig, 4 : prepSig, }
def runModeling(cr, o):
e = [Event(s[1]["app"], s[0], s[1]["inf"]) for s in cr.Sigs.items()] #initializing events
e.sort()
e.append(Event(e[-1].t + cr.Sigs[e[-1].sig_id]["dapp"] + 1, -1, -1)) #adding terminal event
while e:
e1 = e.pop(0)
if(e1.sig_id >= 0):
# print "{} {} {}".format(e1.s_stat, "Start", time.time() - start)
e1 = handleEvent[e1.s_stat](e1, cr, o, o.g.node[e1.emp_id])
if e1:
# print "{} {} {}".format(e1.s_stat, "Event handled", time.time() - start)
bisect.insort_right(e, e1)
if gen_pars["VizOrg"] & (e1.s_stat > 0): o.visualizeGraph()
else:
e = []
return e1.t
def runExperiments():
for i in xrange(gen_pars["NExp"]):
#generate org
nemps = np.random.randint(org_pars[gen_pars["Scen"]]["NEmps_min"], org_pars[gen_pars["Scen"]]["NEmps_max"] + 1)
rec["NEmps"].append(nemps)
min_span = org_pars[gen_pars["Scen"]]["SpanMin"]
max_span = org_pars[gen_pars["Scen"]]["SpanMax"]
rec["MinSpan"].append(min_span)
rec["MaxSpan"].append(max_span)
o = Org(nemps, min_span, max_span)
if gen_pars["VizOrg"]: o.visualizeGraph()
rec["MaxLev"].append(o.max_lev)
#generate crisis
nsigs = np.random.randint(cr_pars["NSig_min"], cr_pars["NSig_max"] + 1)
imp = np.random.choice(cr_pars["Imp_modes"], p = p_v, size = 1)
rec["NSigs"].append(nsigs)
for key in ["NSigCaught", "NMissed", "NDecMade", "ImpCaught", "ImpMissed", "ImpDecMade"]: #initialize stats
rec[key].append(0)
cr = Crisis(nsigs, cr_pars["AppT"], cr_pars["DappT"], imp, o) #nsigs, app, dapp, imp
rec["NInf"].append(len(cr.Sigs))
rec["CrT"].append(runModeling(cr, o)) # <-------- run modeling
if i % 100 == 0: print "{} {} {} {} {} {}".format("Modeling #", i, "lasted for", rec["CrT"][-1], "and took", time.time() - start)
for key in ["ImpCaught", "ImpMissed", "ImpDecMade"]: #normalize stats
rec[key][-1] /= cr.imp_tot
rec["EmpT"].append(np.sum([o.g.node.items()[i][1]["t_proc_tot"] for i in xrange(o.g.number_of_nodes())])) #summing up all t_proc_tot for all employees
rec["TpT"].append(o.g.node[0]["t_proc_tot"])
def vizVector(p, sb_plot, col, x_lab = "", y_lab = "ImpCaught", hist = False):
ax = fig.add_subplot(sb_plot)
if hist:
plt.hist(p, color = col)
else:
plt.plot(p.index, p, linestyle = "--", marker = "o", color = col)
ax.set_ylabel(y_lab)
ax.set_xlabel(x_lab, fontsize = 12)
plt.grid()
#########################`
#Run Experiment
#########################
#experiment details
gen_pars = {"Seed" : 2, "Scen" : "SmallOrg", "NExp" : 100000, "ToCSV" : False, "VizOrg" : False}
org_pars = {"Nasa" : {"NEmps_min" : 22000, "NEmps_max" : 22500, "SpanMin" : 2, "SpanMax" : 8},
"SmallOrg" : {"NEmps_min" : 5, "NEmps_max" : 19, "SpanMin" : 2, "SpanMax" : 3},
"MiddleOrg" : {"NEmps_min" : 100, "NEmps_max" : 250, "SpanMin" : 2, "SpanMax" : 4},
"BigOrg" : {"NEmps_min" : 250, "NEmps_max" : 1000, "SpanMin" : 2, "SpanMax" : 5}}
cr_pars = {"NSig_min" : 5, "NSig_max" : 11, "AppT" : 16, "DappT" : 24, "Imp_modes" : [0.35, 0.5]}
p_v = np.ones(len(cr_pars["Imp_modes"])) / len(cr_pars["Imp_modes"])
#Run experiments
np.random.seed(gen_pars["Seed"])
start = time.time()
runExperiments()
st = pd.DataFrame(rec, columns = cols)
#print st
print "{} {}".format("Execution time is", (time.time() - start))
if gen_pars["ToCSV"]: st.to_csv("{}_{}_{}.{}".format("ModelingResults", gen_pars["Scne"], datetime.now().strftime("%Y-%m-%d %H_%M_%S"), "csv"), sep=';')
#########################`
#Vizualize results
#########################`
fig = plt.figure(facecolor = "white")
#print st["ImpCaught"].groupby([st["NSigs"], rec["NEmps"]]).mean().unstack()
p1 = st["ImpCaught"].groupby([rec["NEmps"]]).mean()
vizVector(p1, 221, "g", "NEmps")
p2 = st["EmpT"].groupby([rec["NEmps"]]).mean()
vizVector(p2, 222, "r", "NEmps", "EmpT")
p3 = st["ImpCaught"].groupby([rec["MaxLev"]]).mean()
vizVector(p3, 223, "r", "MaxLev")
vizVector(st["ImpCaught"], 224, "r", "", "", True)
plt.show() | gpl-2.0 |
tillschumann/nest-simulator | topology/examples/test_3d.py | 13 | 2824 | # -*- coding: utf-8 -*-
#
# test_3d.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module
EXPERIMENTAL example of 3d layer.
3d layers are currently not supported, use at your own risk!
Hans Ekkehard Plesser, UMB
This example uses the function GetChildren, which is deprecated. A deprecation
warning is therefore issued. For details about deprecated functions, see
documentation.
'''
import nest
import pylab
import random
import nest.topology as topo
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
pylab.ion()
nest.ResetKernel()
# generate list of 1000 (x,y,z) triplets
pos = [[random.uniform(-0.5, 0.5), random.uniform(-0.5, 0.5),
random.uniform(-0.5, 0.5)]
for j in range(1000)]
l1 = topo.CreateLayer(
{'extent': [1.5, 1.5, 1.5], # must specify 3d extent AND center
'center': [0., 0., 0.],
'positions': pos,
'elements': 'iaf_psc_alpha'})
# visualize
# xext, yext = nest.GetStatus(l1, 'topology')[0]['extent']
# xctr, yctr = nest.GetStatus(l1, 'topology')[0]['center']
# l1_children is a work-around until NEST 3.0 is released
l1_children = nest.GetChildren(l1)[0]
# extract position information, transpose to list of x, y and z positions
xpos, ypos, zpos = zip(*topo.GetPosition(l1_children))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xpos, ypos, zpos, s=15, facecolor='b', edgecolor='none')
# full connections in volume [-0.2,0.2]**3
topo.ConnectLayers(l1, l1,
{'connection_type': 'divergent', 'allow_autapses': False,
'mask': {'volume': {'lower_left': [-0.2, -0.2, -0.2],
'upper_right': [0.2, 0.2, 0.2]}}})
# show connections from center element
# sender shown in red, targets in green
ctr = topo.FindCenterElement(l1)
xtgt, ytgt, ztgt = zip(*topo.GetTargetPositions(ctr, l1)[0])
xctr, yctr, zctr = topo.GetPosition(ctr)[0]
ax.scatter([xctr], [yctr], [zctr], s=40, facecolor='r', edgecolor='none')
ax.scatter(xtgt, ytgt, ztgt, s=40, facecolor='g', edgecolor='g')
tgts = topo.GetTargetNodes(ctr, l1)[0]
d = topo.Distance(ctr, tgts)
plt.figure()
plt.hist(d, 25)
# plt.show()
| gpl-2.0 |
dsullivan7/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 42 | 2894 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix += dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
dagar/Firmware | Tools/models/sdp3x_pitot_model.py | 8 | 3344 | """
Copyright (c) 2017, Sensirion AG
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Sensirion AG nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# formula for metal pitot tube with round tip as here: https://drotek.com/shop/2986-large_default/sdp3x-airspeed-sensor-kit-sdp31.jpg
# and tubing as provided by px4/drotek (1.5 mm diameter)
import numpy as np
import matplotlib.pyplot as plt
P_cal=96600. #Pa
P_amb=96600. #dummy-value, use absolute pressure sensor!!
## differential pressure, sensor values in Pascal
dp_SDP33_raw=np.linspace(0,80,100)
dp_SDP33=dp_SDP33_raw*P_cal/P_amb
## total length tube in mm = length dynamic port+ length static port; compensation only valid for inner diameter of 1.5mm
l_tube=450
## densitiy air in kg/m3
rho_air=1.29
## flow through sensor
flow_SDP33=(300.805 - 300.878/(0.00344205*dp_SDP33**0.68698 + 1))*1.29/rho_air
## additional dp through pitot tube
dp_Pitot=(0.0032*flow_SDP33**2 + 0.0123*flow_SDP33+1.)*1.29/rho_air
## pressure drop through tube
dp_Tube=(flow_SDP33*0.674)/450*l_tube*rho_air/1.29
## speed at pitot-tube tip due to flow through sensor
dv=0.125*flow_SDP33
## sum of all pressure drops
dp_tot=dp_SDP33+dp_Tube+dp_Pitot
## computed airspeed without correction for inflow-speed at tip of pitot-tube
airspeed_uncorrected=np.sqrt(2*dp_tot/rho_air)
## corrected airspeed
airspeed_corrected=airspeed_uncorrected+dv
## just to compare to value without compensation
airspeed_raw=np.sqrt(2*dp_SDP33/rho_air)
plt.figure()
plt.plot(dp_SDP33,airspeed_corrected)
plt.xlabel('differential pressure raw value [Pa]')
plt.ylabel('airspeed_corrected [m/s]')
plt.show()
##plt.figure()
##plt.plot(dp_SDP33,airspeed_corrected/airspeed_raw)
##plt.xlabel('differential pressure raw value [Pa]')
##plt.ylabel('correction factor [-]')
##plt.show()
##
##
##
##plt.figure()
##plt.plot(airspeed_corrected,(airspeed_corrected-airspeed_raw)/airspeed_corrected)
##plt.xlabel('airspeed [m/s]')
##plt.ylabel('relative error [-]')
##plt.show() | bsd-3-clause |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/ease/feature_extractor.py | 2 | 13908 | """
Extracts features from training set and test set essays
"""
import numpy
import re
import nltk
import sys
from sklearn.feature_extraction.text import CountVectorizer
import pickle
import os
from itertools import chain
import copy
import operator
import logging
base_path = os.path.dirname(__file__)
sys.path.append(base_path)
from essay_set import EssaySet
import util_functions
if not base_path.endswith("/"):
base_path=base_path+"/"
log = logging.getLogger(__name__)
#Paths to needed data files
NGRAM_PATH = base_path + "data/good_pos_ngrams.p"
ESSAY_CORPUS_PATH = util_functions.ESSAY_CORPUS_PATH
class FeatureExtractor(object):
def __init__(self):
self._good_pos_ngrams = self.get_good_pos_ngrams()
self.dict_initialized = False
self._spell_errors_per_character=0
self._grammar_errors_per_character=0
def initialize_dictionaries(self, e_set, max_feats2 = 200):
"""
Initializes dictionaries from an essay set object
Dictionaries must be initialized prior to using this to extract features
e_set is an input essay set
returns a confirmation of initialization
"""
if(hasattr(e_set, '_type')):
if(e_set._type == "train"):
#normal text (unstemmed) useful words/bigrams
nvocab = util_functions.get_vocab(e_set._text, e_set._score, max_feats2 = max_feats2)
#stemmed and spell corrected vocab useful words/ngrams
svocab = util_functions.get_vocab(e_set._clean_stem_text, e_set._score, max_feats2 = max_feats2)
#dictionary trained on proper vocab
self._normal_dict = CountVectorizer(ngram_range=(1,2), vocabulary=nvocab)
#dictionary trained on proper vocab
self._stem_dict = CountVectorizer(ngram_range=(1,2), vocabulary=svocab)
self.dict_initialized = True
#Average spelling errors in set. needed later for spelling detection
self._mean_spelling_errors=sum(e_set._spelling_errors)/float(len(e_set._spelling_errors))
self._spell_errors_per_character=sum(e_set._spelling_errors)/float(sum([len(t) for t in e_set._text]))
#Gets the number and positions of grammar errors
good_pos_tags,bad_pos_positions=self._get_grammar_errors(e_set._pos,e_set._text,e_set._tokens)
self._grammar_errors_per_character=(sum(good_pos_tags)/float(sum([len(t) for t in e_set._text])))
#Generate bag of words features
bag_feats=self.gen_bag_feats(e_set)
#Sum of a row of bag of words features (topical words in an essay)
f_row_sum=numpy.sum(bag_feats[:,:])
#Average index of how "topical" essays are
self._mean_f_prop=f_row_sum/float(sum([len(t) for t in e_set._text]))
ret = "ok"
else:
raise util_functions.InputError(e_set, "needs to be an essay set of the train type.")
else:
raise util_functions.InputError(e_set, "wrong input. need an essay set object")
return ret
def get_good_pos_ngrams(self):
"""
Gets a set of gramatically correct part of speech sequences from an input file called essaycorpus.txt
Returns the set and caches the file
"""
if(os.path.isfile(NGRAM_PATH)):
good_pos_ngrams = pickle.load(open(NGRAM_PATH, 'rb'))
elif os.path.isfile(ESSAY_CORPUS_PATH):
essay_corpus = open(ESSAY_CORPUS_PATH).read()
essay_corpus = util_functions.sub_chars(essay_corpus)
good_pos_ngrams = util_functions.regenerate_good_tokens(essay_corpus)
pickle.dump(good_pos_ngrams, open(NGRAM_PATH, 'wb'))
else:
#Hard coded list in case the needed files cannot be found
good_pos_ngrams=['NN PRP', 'NN PRP .', 'NN PRP . DT', 'PRP .', 'PRP . DT', 'PRP . DT NNP', '. DT',
'. DT NNP', '. DT NNP NNP', 'DT NNP', 'DT NNP NNP', 'DT NNP NNP NNP', 'NNP NNP',
'NNP NNP NNP', 'NNP NNP NNP NNP', 'NNP NNP NNP .', 'NNP NNP .', 'NNP NNP . TO',
'NNP .', 'NNP . TO', 'NNP . TO NNP', '. TO', '. TO NNP', '. TO NNP NNP',
'TO NNP', 'TO NNP NNP']
return set(good_pos_ngrams)
def _get_grammar_errors(self,pos,text,tokens):
"""
Internal function to get the number of grammar errors in given text
pos - part of speech tagged text (list)
text - normal text (list)
tokens - list of lists of tokenized text
"""
word_counts = [max(len(t),1) for t in tokens]
good_pos_tags = []
min_pos_seq=2
max_pos_seq=4
bad_pos_positions=[]
for i in xrange(0, len(text)):
pos_seq = [tag[1] for tag in pos[i]]
pos_ngrams = util_functions.ngrams(pos_seq, min_pos_seq, max_pos_seq)
long_pos_ngrams=[z for z in pos_ngrams if z.count(' ')==(max_pos_seq-1)]
bad_pos_tuples=[[z,z+max_pos_seq] for z in xrange(0,len(long_pos_ngrams)) if long_pos_ngrams[z] not in self._good_pos_ngrams]
bad_pos_tuples.sort(key=operator.itemgetter(1))
to_delete=[]
for m in reversed(xrange(len(bad_pos_tuples)-1)):
start, end = bad_pos_tuples[m]
for j in xrange(m+1, len(bad_pos_tuples)):
lstart, lend = bad_pos_tuples[j]
if lstart >= start and lstart <= end:
bad_pos_tuples[m][1]=bad_pos_tuples[j][1]
to_delete.append(j)
fixed_bad_pos_tuples=[bad_pos_tuples[z] for z in xrange(0,len(bad_pos_tuples)) if z not in to_delete]
bad_pos_positions.append(fixed_bad_pos_tuples)
overlap_ngrams = [z for z in pos_ngrams if z in self._good_pos_ngrams]
if (len(pos_ngrams)-len(overlap_ngrams))>0:
divisor=len(pos_ngrams)/len(pos_seq)
else:
divisor=1
if divisor == 0:
divisor=1
good_grammar_ratio = (len(pos_ngrams)-len(overlap_ngrams))/divisor
good_pos_tags.append(good_grammar_ratio)
return good_pos_tags,bad_pos_positions
def gen_length_feats(self, e_set):
"""
Generates length based features from an essay set
Generally an internal function called by gen_feats
Returns an array of length features
e_set - EssaySet object
"""
text = e_set._text
lengths = [len(e) for e in text]
word_counts = [max(len(t),1) for t in e_set._tokens]
comma_count = [e.count(",") for e in text]
ap_count = [e.count("'") for e in text]
punc_count = [e.count(".") + e.count("?") + e.count("!") for e in text]
chars_per_word = [lengths[m] / float(word_counts[m]) for m in xrange(0, len(text))]
good_pos_tags,bad_pos_positions= self._get_grammar_errors(e_set._pos,e_set._text,e_set._tokens)
good_pos_tag_prop = [good_pos_tags[m] / float(word_counts[m]) for m in xrange(0, len(text))]
length_arr = numpy.array((
lengths, word_counts, comma_count, ap_count, punc_count, chars_per_word, good_pos_tags,
good_pos_tag_prop)).transpose()
return length_arr.copy()
def gen_bag_feats(self, e_set):
"""
Generates bag of words features from an input essay set and trained FeatureExtractor
Generally called by gen_feats
Returns an array of features
e_set - EssaySet object
"""
if(hasattr(self, '_stem_dict')):
sfeats = self._stem_dict.transform(e_set._clean_stem_text)
nfeats = self._normal_dict.transform(e_set._text)
bag_feats = numpy.concatenate((sfeats.toarray(), nfeats.toarray()), axis=1)
else:
raise util_functions.InputError(self, "Dictionaries must be initialized prior to generating bag features.")
return bag_feats.copy()
def gen_feats(self, e_set):
"""
Generates bag of words, length, and prompt features from an essay set object
returns an array of features
e_set - EssaySet object
"""
bag_feats = self.gen_bag_feats(e_set)
length_feats = self.gen_length_feats(e_set)
prompt_feats = self.gen_prompt_feats(e_set)
overall_feats = numpy.concatenate((length_feats, prompt_feats, bag_feats), axis=1)
overall_feats = overall_feats.copy()
return overall_feats
def gen_prompt_feats(self, e_set):
"""
Generates prompt based features from an essay set object and internal prompt variable.
Generally called internally by gen_feats
Returns an array of prompt features
e_set - EssaySet object
"""
prompt_toks = nltk.word_tokenize(e_set._prompt)
expand_syns = []
for word in prompt_toks:
synonyms = util_functions.get_wordnet_syns(word)
expand_syns.append(synonyms)
expand_syns = list(chain.from_iterable(expand_syns))
prompt_overlap = []
prompt_overlap_prop = []
for j in e_set._tokens:
tok_length=len(j)
if(tok_length==0):
tok_length=1
prompt_overlap.append(len([i for i in j if i in prompt_toks]))
prompt_overlap_prop.append(prompt_overlap[len(prompt_overlap) - 1] / float(tok_length))
expand_overlap = []
expand_overlap_prop = []
for j in e_set._tokens:
tok_length=len(j)
if(tok_length==0):
tok_length=1
expand_overlap.append(len([i for i in j if i in expand_syns]))
expand_overlap_prop.append(expand_overlap[len(expand_overlap) - 1] / float(tok_length))
prompt_arr = numpy.array((prompt_overlap, prompt_overlap_prop, expand_overlap, expand_overlap_prop)).transpose()
return prompt_arr.copy()
def gen_feedback(self, e_set, features=None):
"""
Generate feedback for a given set of essays
e_set - EssaySet object
features - optionally, pass in a matrix of features extracted from e_set using FeatureExtractor
in order to get off topic feedback.
Returns a list of lists (one list per essay in e_set)
e_set - EssaySet object
"""
#Set ratio to modify thresholds for grammar/spelling errors
modifier_ratio=1.05
#Calc number of grammar and spelling errors per character
set_grammar,bad_pos_positions=self._get_grammar_errors(e_set._pos,e_set._text,e_set._tokens)
set_grammar_per_character=[set_grammar[m]/float(len(e_set._text[m])+.1) for m in xrange(0,len(e_set._text))]
set_spell_errors_per_character=[e_set._spelling_errors[m]/float(len(e_set._text[m])+.1) for m in xrange(0,len(e_set._text))]
#Iterate through essays and create a feedback dict for each
all_feedback=[]
for m in xrange(0,len(e_set._text)):
#Be very careful about changing these messages!
individual_feedback={'grammar' : "Grammar: Ok.",
'spelling' : "Spelling: Ok.",
'markup_text' : "",
'grammar_per_char' : set_grammar_per_character[m],
'spelling_per_char' : set_spell_errors_per_character[m],
'too_similar_to_prompt' : False,
}
markup_tokens=e_set._markup_text[m].split(" ")
#This loop ensures that sequences of bad grammar get put together into one sequence instead of staying
#disjointed
bad_pos_starts=[z[0] for z in bad_pos_positions[m]]
bad_pos_ends=[z[1]-1 for z in bad_pos_positions[m]]
for z in xrange(0,len(markup_tokens)):
if z in bad_pos_starts:
markup_tokens[z]='<bg>' + markup_tokens[z]
elif z in bad_pos_ends:
markup_tokens[z]=markup_tokens[z] + "</bg>"
if(len(bad_pos_ends)>0 and len(bad_pos_starts)>0 and len(markup_tokens)>1):
if max(bad_pos_ends)>(len(markup_tokens)-1) and max(bad_pos_starts)<(len(markup_tokens)-1):
markup_tokens[len(markup_tokens)-1]+="</bg>"
#Display messages if grammar/spelling errors greater than average in training set
if set_grammar_per_character[m]>(self._grammar_errors_per_character*modifier_ratio):
individual_feedback['grammar']="Grammar: More grammar errors than average."
if set_spell_errors_per_character[m]>(self._spell_errors_per_character*modifier_ratio):
individual_feedback['spelling']="Spelling: More spelling errors than average."
#Test topicality by calculating # of on topic words per character and comparing to the training set
#mean. Requires features to be passed in
if features is not None:
f_row_sum=numpy.sum(features[m,12:])
f_row_prop=f_row_sum/len(e_set._text[m])
if f_row_prop<(self._mean_f_prop/1.5) or len(e_set._text[m])<20:
individual_feedback['topicality']="Topicality: Essay may be off topic."
if(features[m,9]>.6):
individual_feedback['prompt_overlap']="Prompt Overlap: Too much overlap with prompt."
individual_feedback['too_similar_to_prompt']=True
log.debug(features[m,9])
#Create string representation of markup text
markup_string=" ".join(markup_tokens)
individual_feedback['markup_text']=markup_string
all_feedback.append(individual_feedback)
return all_feedback
| agpl-3.0 |
raghavrv/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 76 | 2055 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3,
random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
JelteF/statistics | 5/minerr_1.py | 1 | 1102 | import numpy as np
from scipy import stats
import matplotlib.pyplot as pl
import seaborn as sns # noqa
from itertools import count
from pylatex import Plt
P = [0.3, 0.7]
mu = [4, 7]
sigma = [1, 1.5]
# The second sigma is 1.5 so it corresponds with the graph in the section
# 2.4.2, it didn't work with the sigma=2 mentioned there.
def p_xc(x, C):
"""Get pxc(x, C=C)"""
mu_C = mu[C-1]
sigma_C = sigma[C-1]
p_x_c = stats.norm(loc=mu_C, scale=sigma_C) # px|c(x|C=C)
return p_x_c.pdf(x) * P[C-1]
def main():
n = 100
X = np.linspace(-4, 15, n)
p_xc1 = p_xc(X, 1)
p_xc2 = p_xc(X, 2)
P_cx = np.zeros((n, ))
for i, y1, y2 in zip(count(), p_xc1, p_xc2):
px = y1 + y2 # px(x)
P_cx[i] = y1 / px # P(C=1|x)
pl.plot(X, p_xc1, 'b')
pl.plot(X, p_xc2, 'r')
pl.plot(X, P_cx, 'b--')
pl.plot(X, 1 - P_cx, 'r--')
with open('minerr_1.tex', 'w') as f:
plt = Plt(position='H')
plt.add_plot(pl)
plt.add_caption('Minimum Error Classification I')
plt.dump(f)
if __name__ == '__main__':
main()
| mit |
ndchorley/scipy | tools/refguide_check.py | 8 | 13897 | #!/usr/bin/env python
"""
refguide_check.py [OPTIONS] [-- ARGS]
Check for a Scipy submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings. This is different from doctesting [we do not aim to have
scipy docstrings doctestable!], this is just to make sure that code in
docstrings is valid python::
$ python refguide_check.py --check_docs optimize
"""
from __future__ import print_function
import sys
import os
import re
import copy
import inspect
import warnings
import doctest
import tempfile
import shutil
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
from argparse import ArgumentParser, REMAINDER
import numpy as np
BASE_MODULE = "scipy"
PUBLIC_SUBMODULES = [
'linalg',
'cluster',
'cluster.vq',
'cluster.hierarchy',
'fftpack',
'interpolate',
'integrate',
'io',
'misc',
'ndimage',
'odr',
'optimize',
'signal',
'spatial',
'sparse',
'sparse.csgraph',
'sparse.linalg',
'stats',
'stats.mstats',
]
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([
'scipy.integrate.quad',
'scipy.interpolate.UnivariateSpline',
'scipy.stats.levy_stable'
])
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_funcnames(module):
funcnames = set()
# 3 spaces followed by function name, and maybe some spaces, some
# dashes, and an explanation; only function names listed in
# refguide are formatted like this (mostly, there may be some false
# positives)
pattern = re.compile("^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$")
for line in module.__doc__.splitlines():
res = re.search(pattern, line)
if res is not None:
funcname = res.groups()[0]
funcnames.add(funcname)
return funcnames
def get_all_dict(module):
"""Return a copy of the __all__ dict with irrelevant items removed."""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("__")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
return not_deprecated, deprecated
def compare(all_dict, funcnames):
"""Return sets of objects only in one of __all__, refguide."""
only_all = set()
for name in all_dict:
if name not in funcnames:
only_all.add(name)
only_ref = set()
for name in funcnames:
if name not in all_dict:
only_ref.add(name)
return only_all, only_ref
def is_deprecated(f):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg":None})
except DeprecationWarning:
return True
except:
pass
return False
def report(all_dict, funcnames, deprecated, module_name):
"""Print out a report for the module"""
print("\n\n" + "=" * len(module_name))
print(module_name)
print("=" * len(module_name) + "\n")
num_all = len(all_dict)
num_ref = len(funcnames)
print("Non-deprecated objects in __all__: %i" % num_all)
print("Objects in refguide: %i" % num_ref)
only_all, only_ref = compare(all_dict, funcnames)
dep_in_ref = set(only_ref).intersection(deprecated)
only_ref = set(only_ref).difference(deprecated)
if len(only_all) == len(only_ref) == 0:
print("\nNo missing or extraneous items!")
else:
if len(only_all) > 0:
print("")
print("Objects in %s.__all__ but not in refguide::\n" % module_name)
for name in only_all:
print(" " + name)
if len(only_ref) > 0:
print("")
print("Objects in refguide but not in %s.__all__::\n" % module_name)
for name in only_ref:
print(" " + name)
if len(dep_in_ref) > 0:
print("")
print("Deprecated objects in refguide::\n")
for name in deprecated:
print(" " + name)
def check_docstrings(module, verbose):
"""Check code in docstrings of the module's public symbols.
"""
# the namespace to run examples in
ns = {'np': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf,}
# if MPL is available, use display-less backend
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
have_matplotlib = True
except ImportError:
have_matplotlib = False
def format_item_header(name):
return "\n\n" + name + "\n" + "-" * len(name)
class DTRunner(doctest.DocTestRunner):
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', 'ax.axis', 'plt.plot(',
'.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim'}
rndm_markers = {'# random', '# Random', '#random', '#Random'}
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
out(format_item_header(self._item_name))
if new_line:
out("\n")
self._item_name = None
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
self._report_item_name(out)
return doctest.DocTestRunner.report_unexpected_exception(
self, out, test, example, exc_info)
def report_failure(self, out, test, example, got):
if (any(word in example.source for word in self.stopwords) or
any(word in example.want for word in self.rndm_markers)):
# do not complain if output does not match
pass
else:
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')
vanilla = doctest.OutputChecker()
def __init__(self, parse_namedtuples=True, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(ns))
a_got = eval(got, dict(ns))
except:
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
num = len(a_want)
regex = ('[\w\d_]+\(' +
', '.join(['[\w\d_]+=(.+)']*num) +
'\)')
grp = re.findall(regex, got.replace('\n', ' '))
if len(grp) > 1: # no more than one for now
return False
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return self.check_output(want, got_again, optionflags)
except Exception:
return False
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except TypeError:
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogenous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
# Loop over non-deprecated items
all_success = True
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPLIST:
continue
try:
obj = getattr(module, name)
except AttributeError:
import traceback
print(format_item_header(full_name))
print("Missing item!")
print(traceback.format_exc())
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except:
import traceback
print(format_item_header(full_name))
print("Failed to get doctests:")
print(traceback.format_exc())
continue
flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
# Run tests, trying to restore global state afterward
old_printoptions = np.get_printoptions()
old_errstate = np.seterr()
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
try:
os.chdir(tmpdir)
for t in tests:
t.filename = short_path(t.filename, cwd)
fails, successes = runner.run(t)
if fails > 0:
all_success = False
if have_matplotlib:
plt.close('all')
finally:
os.chdir(cwd)
shutil.rmtree(tmpdir)
np.set_printoptions(**old_printoptions)
np.seterr(**old_errstate)
if not verbose and all_success:
# Print at least a success message if no other output was produced
print("\nAll doctests pass!")
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=list(PUBLIC_SUBMODULES),
nargs='*', help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true", help="Run also doctests")
parser.add_argument("--verbose", action="store_true")
args = parser.parse_args(argv)
for submodule_name in args.module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
funcnames = find_funcnames(module)
all_dict, deprecated = get_all_dict(module)
report(all_dict, funcnames, deprecated, module_name)
if args.doctests:
check_docstrings(module, args.verbose)
if __name__ == '__main__':
main(argv=sys.argv[1:])
| bsd-3-clause |
hrichstein/Stellar_mass_env_Density | Codes/Scripts/plot_all_mocks.py | 1 | 80737 | from __future__ import division, absolute_import
import astropy.stats
import glob
import math
import matplotlib.pyplot as plt
from matplotlib import ticker
from matplotlib.ticker import FormatStrFormatter
import numpy as np
import os
import pandas as pd
from scipy import integrate,optimize,spatial
###############################################################################
###############################################################################
###############################################################################
__author__ =['Victor Calderon']
__copyright__ =["Copyright 2016 Victor Calderon, Index function"]
__email__ =['[email protected]']
__maintainer__ =['Victor Calderon']
def Index(directory, datatype):
"""
Indexes the files in a directory `directory' with a
specific data type.
Parameters
----------
directory: str
Absolute path to the folder that is indexed.
datatype: str
Data type of the files to be indexed in the folder.
Returns
-------
file_array: array_like
np.array of indexed files in the folder 'directory'
with specific datatype.
Examples
--------
>>> Index('~/data', '.txt')
>>> array(['A.txt', 'Z'.txt', ...])
"""
assert(os.path.exists(directory))
files = np.array(glob.glob('{0}/*{1}'.format(directory, datatype)))
return files
###############################################################################
def myceil(x, base=10):
"""
Returns the upper-bound integer of 'x' in base 'base'.
Parameters
----------
x: float
number to be approximated to closest number to 'base'
base: float
base used to calculate the closest 'largest' number
Returns
-------
n_high: float
Closest float number to 'x', i.e. upper-bound float.
Example
-------
>>>> myceil(12,10)
20
>>>>
>>>> myceil(12.05, 0.1)
12.10000
"""
n_high = float(base*math.ceil(float(x)/base))
return n_high
###############################################################################
def myfloor(x, base=10):
"""
Returns the lower-bound integer of 'x' in base 'base'
Parameters
----------
x: float
number to be approximated to closest number of 'base'
base: float
base used to calculate the closest 'smallest' number
Returns
-------
n_low: float
Closest float number to 'x', i.e. lower-bound float.
Example
-------
>>>> myfloor(12, 5)
>>>> 10
"""
n_low = float(base*math.floor(float(x)/base))
return n_low
###############################################################################
def Bins_array_create(arr, base=10):
"""
Generates array between [arr.min(), arr.max()] in steps of `base`.
Parameters
----------
arr: array_like, Shape (N,...), One-dimensional
Array of numerical elements
base: float, optional (default=10)
Interval between bins
Returns
-------
bins_arr: array_like
Array of bin edges for given arr
"""
base = float(base)
arr = np.array(arr)
assert(arr.ndim==1)
arr_min = myfloor(arr.min(), base=base)
arr_max = myceil( arr.max(), base=base)
bins_arr = np.arange(arr_min, arr_max+0.5*base, base)
return bins_arr
###############################################################################
###############################################################################
###############################################################################
def sph_to_cart(ra,dec,cz):
"""
Converts spherical coordinates to Cartesian coordinates.
Parameters
----------
ra: array-like
right-ascension of galaxies in degrees
dec: array-like
declination of galaxies in degrees
cz: array-like
velocity of galaxies in km/s
Returns
-------
coords: array-like, shape = N by 3
x, y, and z coordinates
"""
cz_dist = cz/70. #converts velocity into distance
x_arr = cz_dist*np.cos(np.radians(ra))*np.cos(np.radians(dec))
y_arr = cz_dist*np.sin(np.radians(ra))*np.cos(np.radians(dec))
z_arr = cz_dist*np.sin(np.radians(dec))
coords = np.column_stack((x_arr,y_arr,z_arr))
return coords
############################################################################
def calc_dens(n_val,r_val):
"""
Returns densities of spheres with radius being the distance to the
nth nearest neighbor.
Parameters
----------
n_val = integer
The 'N' from Nth nearest neighbor
r_val = array-like
An array with the distances to the Nth nearest neighbor for
each galaxy
Returns
-------
dens: array-like
An array with the densities of the spheres created with radii
to the Nth nearest neighbor.
"""
dens = np.array([(3.*(n_val+1)/(4.*np.pi*r_val[hh]**3)) \
for hh in range(len(r_val))])
return dens
###############################################################################
def plot_calcs(mass,bins,dlogM):
"""
Returns values for plotting the stellar mass function and
mass ratios
Parameters
----------
mass: array-like
A 1D array with mass values, assumed to be in order
bins: array=like
A 1D array with the values which will be used as the bin edges
by the histogram function
dlogM: float-like
The log difference between bin edges
Returns
-------
bin_centers: array-like
An array with the medians mass values of the mass bins
mass-freq: array-like
Contains the number density values of each mass bin
ratio_dict: dictionary-like
A dictionary with three keys, corresponding to the divisors
2,4, and 10 (as the percentile cuts are based on these
divisions). Each key has the density-cut, mass ratios for
that specific cut (50/50 for 2; 25/75 for 4; 10/90 for 10).
"""
mass_counts, edges = np.histogram(mass,bins)
bin_centers = 0.5*(edges[:-1]+edges[1:])
mass_freq = mass_counts/float(len(mass))/dlogM
# non_zero = (mass_freq!=0)
ratio_dict = {}
frac_val = [2,4,10]
yerr = []
bin_centers_fin = []
for ii in frac_val:
ratio_dict[ii] = {}
frac_data = int(len(mass)/ii)
# Calculations for the lower density cut
frac_mass = mass[0:frac_data]
counts, edges = np.histogram(frac_mass,bins)
# Calculations for the higher density cut
frac_mass_2 = mass[-frac_data:]
counts_2, edges_2 = np.histogram(frac_mass_2,bins)
# Ratio determination
ratio_counts = (1.*counts_2)/(1.*counts)
non_zero = np.isfinite(ratio_counts)
ratio_counts_1 = ratio_counts[non_zero]
# print 'len ratio_counts: {0}'.format(len(ratio_counts_1))
ratio_dict[ii] = ratio_counts_1
temp_yerr = (counts_2*1.)/(counts*1.)*\
np.sqrt(1./counts + 1./counts_2)
temp_yerr_1 = temp_yerr[non_zero]
# print 'len yerr: {0}'.format(len(temp_yerr_1))
yerr.append(temp_yerr_1)
bin_centers_1 = bin_centers[non_zero]
# print 'len bin_cens: {0}'.format(len(bin_centers_1))
bin_centers_fin.append(bin_centers_1)
mass_freq_list = [[] for xx in xrange(2)]
mass_freq_list[0] = mass_freq
mass_freq_list[1] = np.sqrt(mass_counts)/float(len(mass))/dlogM
mass_freq = np.array(mass_freq_list)
ratio_dict_list = [[] for xx in range(2)]
ratio_dict_list[0] = ratio_dict
ratio_dict_list[1] = yerr
ratio_dict = ratio_dict_list
return bin_centers_fin, mass_freq, ratio_dict
###############################################################################
def bin_func(mass_dist,bins,kk,bootstrap=False):
"""
Returns median distance to Nth nearest neighbor
Parameters
----------
mass_dist: array-like
An array with mass values in at index 0 (when transformed) and distance
to the Nth nearest neighbor in the others
Example: 6239 by 7
Has mass values and distances to 6 Nth nearest neighbors
bins: array=like
A 1D array with the values which will be used as the bin edges
kk: integer-like
The index of mass_dist (transformed) where the appropriate distance
array may be found
Optional
--------
bootstrap == True
Calculates the bootstrap errors associated with each median distance
value. Creates an array housing arrays containing the actual distance
values associated with every galaxy in a specific bin. Bootstrap error
is then performed using astropy, and upper and lower one sigma values
are found for each median value. These are added to a list with the
median distances, and then converted to an array and returned in place
of just 'medians.'
Returns
-------
medians: array-like
An array with the median distance to the Nth nearest neighbor from
all the galaxies in each of the bins
"""
frac_vals = np.array([2,4,10])
edges = bins
# print 'length bins:'
# print len(bins)
digitized = np.digitize(mass_dist.T[0],edges)
digitized -= int(1)
bin_nums = np.unique(digitized)
# bin_nums_list = list(bin_nums)
# # if (len(bins)) in bin_nums_list:
# # bin_nums_list.remove(len(bins))
# # print 'removed'
# # print bin_nums_list
# bin_nums = np.array(bin_nums_list)
for ii in bin_nums:
if len(mass_dist.T[kk][digitized==ii]) == 0:
temp_list = list(mass_dist.T[kk]\
[digitized==ii])
temp_list.append(np.zeros(len(bin_nums)))
mass_dist.T[kk][digitized==ii] = np.array(temp_list)
# print bin_nums
# print len(bin_nums)
medians = np.array([np.median(mass_dist.T[kk][digitized==ii]) \
for ii in bin_nums])
# print len(medians)
if bootstrap == True:
dist_in_bin = np.array([(mass_dist.T[kk][digitized==ii]) \
for ii in bin_nums])
for vv in range(len(dist_in_bin)):
if len(dist_in_bin[vv]) == 0:
dist_in_bin_list = list(dist_in_bin[vv])
dist_in_bin[vv] = np.zeros(len(dist_in_bin[0]))
low_err_test = np.array([np.percentile(astropy.stats.bootstrap\
(dist_in_bin[vv],bootnum=1000,bootfunc=np.median),16) \
for vv in range(len(dist_in_bin))])
high_err_test = np.array([np.percentile(astropy.stats.bootstrap\
(dist_in_bin[vv],bootnum=1000,bootfunc=np.median),84) \
for vv in range(len(dist_in_bin))])
med_list = [[] for yy in range(len(frac_vals))]
med_list[0] = medians
med_list[1] = low_err_test
med_list[2] = high_err_test
medians = np.array(med_list)
return medians
###############################################################################
def hist_calcs(mass,bins,dlogM):
"""
Returns dictionaries with the counts for the upper
and lower density portions; calculates the
three different percentile cuts for each mass
array given
Parameters
----------
mass: array-like
A 1D array with log stellar mass values, assumed
to be an order which corresponds to the ascending
densities; (necessary, as the index cuts are based
on this)
bins: array-like
A 1D array with the values which will be used as the bin edges
dlogM: float-like
The log difference between bin edges
Returns
-------
hist_dict_low: dictionary-like
A dictionary with three keys (the frac vals), with arrays
as values. The values for the lower density cut
hist_dict_high: dictionary like
A dictionary with three keys (the frac vals), with arrays
as values. The values for the higher density cut
"""
hist_dict_low = {}
hist_dict_high = {}
frac_val = np.array([2,4,10])
frac_dict = {2:0,4:1,10:2}
low_err = [[] for xx in xrange(len(frac_val))]
high_err = [[] for xx in xrange(len(frac_val))]
for ii in frac_val:
# hist_dict_low[ii] = {}
# hist_dict_high[ii] = {}
frac_data = int(len(mass)/ii)
frac_mass = mass[0:frac_data]
counts, edges = np.histogram(frac_mass,bins)
low_counts = (counts/float(len(frac_mass))/dlogM)
non_zero = (low_counts!=0)
low_counts_1 = low_counts[non_zero]
hist_dict_low[ii] = low_counts_1
low_err = np.sqrt(counts)/len(frac_mass)/dlogM
low_err_1 = low_err[non_zero]
err_key = 'err_{0}'.format(ii)
hist_dict_low[err_key] = low_err_1
frac_mass_2 = mass[-frac_data:]
counts_2, edges_2 = np.histogram(frac_mass_2,bins)
high_counts = (counts_2/float(len(frac_mass_2))/dlogM)
non_zero = (high_counts!=0)
high_counts_1 = high_counts[non_zero]
hist_dict_high[ii] = high_counts_1
high_err = np.sqrt(counts_2)/len(frac_mass_2)/dlogM
high_err_1 = high_err[non_zero]
hist_dict_high[err_key] = high_err_1
return hist_dict_low, hist_dict_high
###############################################################################
def mean_bin_mass(mass_dist,bins):
"""
Returns median distance to Nth nearest neighbor
Parameters
----------
mass_dist: array-like
An array with mass values in at index 0 (when transformed)
bins: array=like
A 1D array with the values which will be used as the bin edges
Returns
-------
"""
edges = bins
digitized = np.digitize(mass_dist.T[0],edges)
digitized -= int(1)
bin_nums = np.unique(digitized)
mean_mass = np.array([np.mean(mass_dist.T[0][digitized==ii]) \
for ii in bin_nums])
return mean_mass
###############################################################################
def plot_all_rats(bin_centers,y_vals,neigh_val,ax,col_num,plot_idx):
"""
Returns a plot showing the density-cut, mass ratio. Optimally
used with a well-initiated for-loop
Parameters
----------
bin_centers: array-like
An array with the medians mass values of the mass bins
y_vals: array-like
An array containing the ratio values for each mass bin
neigh_val: integer-like
Value which will be inserted into the text label of each plot
ax: axis-like
A value which specifies which axis each subplot is to be
plotted to
col_num: integer-like
Integer which specifies which column is currently being
plotted. Used for labelling subplots
plot_idx: integer-like
Specifies which subplot the figure is plotted to. Used for
labeling the x-axis
Returns
-------
Figure with three subplots showing appropriate ratios
"""
if plot_idx ==16:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18)
if col_num ==0:
title_label = 'Mass Ratio 50/50, {0} NN'.format(neigh_val)
frac_val = 10
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
elif col_num ==1:
title_label = 'Mass Ratio 25/75, {0} NN'.format(neigh_val)
frac_val = 4
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
elif col_num ==2:
title_label = 'Mass Ratio 10/90, {0} NN'.format(neigh_val)
frac_val = 2
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
ax.set_xlim(9.1,11.9)
ax.set_ylim([0,5])
ax.set_xticks(np.arange(9.5, 12., 0.5))
ax.set_yticks([1.,3.])
ax.tick_params(axis='both', labelsize=12)
ax.axhline(y=1,c="darkorchid",linewidth=0.5,zorder=0)
ax.plot(bin_centers,y_vals,color='silver')
###############################################################################
def plot_eco_rats(bin_centers,y_vals,neigh_val,ax,col_num,plot_idx,only=False):
"""
Returns subplots of ECO density-cut,mass ratios
Parameters
----------
bin_centers: array-like
An array with the medians mass values of the mass bins
y_vals: array-like
An array containing the ratio values for each mass bin
neigh_val: integer-like
Value which will be inserted into the text label of each plot
ax: axis-like
A value which specifies which axis each subplot is to be
plotted to
col_num: integer-like
Integer which specifies which column is currently being
plotted. Used for labelling subplots
plot_idx: integer-like
Specifies which subplot the figure is plotted to. Used for
labeling the x-axis
Optional
--------
only == True
To be used when only plotting the ECO ratios, no mocks.
Will add in the additional plotting specifications that
would have been taken care of previously in a for-loop
which plotted the mocks as well
Returns
-------
ECO ratios plotted to any previously initialized figure
"""
if only == True:
if plot_idx ==16:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18)
if col_num ==0:
title_label = 'Mass Ratio 50/50, {0} NN'.format(neigh_val)
frac_val = 10
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
elif col_num ==1:
title_label = 'Mass Ratio 25/75, {0} NN'.format(neigh_val)
frac_val = 4
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
elif col_num ==2:
title_label = 'Mass Ratio 10/90, {0} NN'.format(neigh_val)
frac_val = 2
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
ax.set_xlim(9.1,11.9)
ax.set_ylim([0,5])
ax.set_xticks(np.arange(9.5, 12., 0.5))
ax.set_yticks([1.,3.])
ax.tick_params(axis='both', labelsize=12)
ax.axhline(y=1,c="darkorchid",linewidth=0.5,zorder=0)
frac_vals = np.array([2,4,10])
y_vals_2 = y_vals[0][frac_vals[hh]]
ax.errorbar(bin_centers,y_vals_2,yerr=y_vals[1][hh],\
color='dodgerblue',linewidth=2)
###############################################################################
def plot_hists(mass,neigh_val,bins,dlogM,ax,col_num,plot_idx):
"""
Returns a plot showing the density-cut, mass counts.
Parameters
----------
mass: array-like
A 1D array with log stellar mass values
neigh_val: integer-like
Value which will be inserted into the text label of each plot
bins: array-like
A 1D array with the values which will be used as the bin edges
dlogM: float-like
The log difference between bin edges
ax: axis-like
A value which specifies which axis each subplot is to be
plotted to
col_num: integer-like
Integer which specifies which column is currently being
plotted. Used for labelling subplots
plot_idx: integer-like
Specifies which subplot the figure is plotted to. Used for
labeling the x-axis
Returns
-------
Figure with two curves, optionally (if uncommented) plotted in step
"""
ax.set_yscale('log')
if col_num==0:
title_label = 'Mass 50/50, {0} NN'.format(neigh_val)
frac_val = 2
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
elif col_num==1:
title_label = 'Mass 25/75, {0} NN'.format(neigh_val)
frac_val = 4
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
elif col_num==2:
title_label = 'Mass 10/90, {0} NN'.format(neigh_val)
frac_val = 10
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
if plot_idx == 16:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18)
ax.set_xlim(9.1,11.9)
ax.set_ylim([10**-3,10**1])
ax.set_xticks(np.arange(9.5, 12., 0.5))
ax.set_yticks([10**-2,10**0])
frac_data = (len(mass)/frac_val)
frac_mass = mass[0:frac_data]
counts, edges = np.histogram(frac_mass,bins)
low_counts = (counts/float(len(frac_mass))/dlogM)
bins_cens = .5*(edges[:-1]+edges[1:])
# ax.step(bins_cens, low_counts, color='lightslategrey',where='mid',\
# alpha=0.1)
ax.plot(bins_cens, low_counts, color='lightslategrey',alpha=0.1)
frac_mass_2 = mass[-frac_data:]
counts_2, edges_2 = np.histogram(frac_mass_2,bins)
high_counts = (counts_2/float(len(frac_mass_2))/dlogM)
# ax.step(bins_cens, high_counts, color='lightslategray',where='mid',\
# alpha=0.1)
ax.plot(bins_cens, high_counts, color='lightslategray',alpha=0.1)
# res = np.array([low_counts,high_counts])
# return res
###############################################################################
def plot_eco_hists(mass,bins,dlogM,ax,col,plot_idx):
if col==0:
frac_val = 2
elif col==1:
frac_val = 4
elif col==2:
frac_val = 10
frac_data = (len(mass)/frac_val)
frac_mass = mass[0:frac_data]
counts, edges = np.histogram(frac_mass,bins)
bins_cens = .5*(edges[:-1]+edges[1:])
ax.step(bins_cens, (counts/float(len(frac_mass))/dlogM), color='lime',\
where='mid',label='Lower')
frac_mass_2 = mass[-frac_data:]
counts_2, edges_2 = np.histogram(frac_mass_2,bins)
ax.step(bins_cens, (counts_2/float(len(frac_mass_2))/dlogM), \
color='dodgerblue',where='mid',label='Higher')
if plot_idx == 0:
ax.legend(loc='best')
###############################################################################
def plot_all_meds(bin_centers,y_vals,ax,plot_idx):
"""
Returns six subplots showing the median distance to
the Nth nearest neighbor for each mass bin. Assumes a
previously defined figure. Best used in a for-loop
Parameters
----------
bin_centers: array-like
An array with the medians mass values of the mass bins
y_vals: array-like
An array containing the median distance values for each mass bin
ax: axis-like
A value which specifies which axis each subplot is to be
plotted to
plot_idx: integer-like
Specifies which subplot the figure is plotted to. Used for
the text label in each subplot
Returns
-------
Subplots displaying the median distance to Nth nearest neighbor
trends for each mass bin
"""
titles = [1,2,3,5,10,20]
ax.set_ylim(0,10**1.5)
ax.set_xlim(9.1,11.9)
ax.set_yscale('symlog')
ax.set_xticks(np.arange(9.5,12.,0.5))
ax.set_yticks(np.arange(0,12,1))
ax.set_yticklabels(np.arange(1,11,2))
ax.tick_params(axis='x', which='major', labelsize=16)
title_here = 'n = {0}'.format(titles[plot_idx])
ax.text(0.05, 0.95, title_here,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=18)
if plot_idx == 4:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=20)
ax.plot(bin_centers,y_vals,color='silver')
#############################################################################
def plot_eco_meds(bin_centers,y_vals,low_lim,up_lim,ax,plot_idx,only=False):
"""
Returns six subplots showing the median Nth nearest neighbor distance for
ECO galaxies in each mass bin
Parameters
----------
bin_centers: array-like
An array with the medians mass values of the mass bins
y_vals: array-like
An array containing the median distance values for each mass bin
low_lim: array-like
An array with the lower cut-off of the bootstrap errors for each median
up_lim: array-like
An array with the upper cut-off of the bootstrap errors for each median
ax: axis-like
A value which specifies which axis each subplot is to be
plotted to
plot_idx: integer-like
Specifies which subplot the figure is plotted to. Used for
the text label in each subplot
Optional
--------
only == False
To be used when only plotting the ECO median trends,
no mocks. Will add in the additional plotting
specifications that would have been taken care of
previously in a for-loop which plotted the mocks as well
Returns
-------
Subplots displaying the median distance to Nth nearest neighbor
trends for each mass bin, with the bootstrap errors
"""
if only == True:
titles = [1,2,3,5,10,20]
ax.set_ylim(0,10**1.5)
ax.set_xlim(9.1,11.9)
ax.set_yscale('symlog')
ax.set_xticks(np.arange(9.5,12.,0.5))
ax.tick_params(axis='both', which='major', labelsize=16)
title_here = 'n = {0}'.format(titles[plot_idx])
ax.text(0.05, 0.95, title_here,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=18)
if plot_idx == 4:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18)
ytop = np.array(up_lim-y_vals)
ybot = np.array(y_vals-low_lim)
ax.errorbar(bin_centers,y_vals,yerr=(ybot,ytop),color='darkmagenta',label='ECO')
# if plot_idx == 5:
# ax.legend(loc='best')
###############################################################################
def plot_bands(bin_centers,upper,lower,ax):
"""
Returns an overlayed, fill-between plot, creating a band
between the different mock catalog values plotted
Parameters
----------
bin_centers: array-like
An array with the medians mass values of the mass bins
upper: array-like
Array with the max y-values among all the mocks
for each mass bin
lower: array-like
Array with the min y-values among all the mocks
for each mass bin
ax: axis-like
A value which specifies which axis each subplot is to be
plotted to
Returns
-------
A semi-transparent band overlaying the area of the plot
bordedodgerblue by the mocks
"""
ax.fill_between(bin_centers,upper,lower,color='silver',alpha=0.1)
###############################################################################
def plot_med_range(bin_centers,low_lim,up_lim,ax,alpha,color='gray'):
"""
Returns a plot with a transparent band highlighting a range of
values.
Parameters
----------
bin_centers: array-like
An array with the medians mass values of the mass bins
low_lim: array-like
Array with the min y-values among all the mocks
for each mass bin
up_lim: array-like
Array with the max y-values among all the mocks
for each mass bin
ax: axis-like
A value which specifies which axis each subplot is to be
plotted to
alpha: float-like
A value which will determine the tranparency of the band
color: str
Any color which Python recognizes; sets the color of the band
Returns
-------
A band spanning from the max y-values to the minimum.
"""
ax.fill_between(bin_centers,low_lim,up_lim,color=color,alpha=alpha)
##############################################################################
##############################################################################
##############################################################################
dirpath = r"C:\Users\Hannah\Desktop\Vanderbilt_REU\Stellar_mass_env_density"
dirpath += r"\Catalogs\Beta_M1_Behroozi"
dirpath += r"\ab_matching"
dirpath += r"\Resolve_plk_5001_so_mvir_hod1_scatter0p2_mock1_ECO_Mocks"
usecols = (0,1,2,4,13)
dlogM = 0.2
##############################################################################
##############################################################################
##############################################################################
ECO_cats = (Index(dirpath,'.dat'))
names = ['ra','dec','cz','Halo_ID','logMstar']
PD = [(pd.read_csv(ECO_cats[ii],sep="\s+", usecols= usecols,header=None,\
skiprows=2,names=names)) for ii in range(len(ECO_cats))]
PD_comp = [(PD[ii][PD[ii].logMstar >= 9.1]) for ii in range(len(ECO_cats))]
PD = [[] for ii in range(len(ECO_cats))]
for ii in range(len(ECO_cats)):
temp_PD = (pd.read_csv(ECO_cats[ii],sep="\s+", usecols= usecols,\
header=None,skiprows=2,names=names))
PD[ii] = temp_PD
PD_comp_1 = [(PD[ii][PD[ii].logMstar >= 9.1]) for ii in range(len(ECO_cats))]
PD_comp = [(PD_comp_1[ii][PD_comp_1[ii].logMstar <=11.77]) \
for ii in range(len(ECO_cats))]
[(PD_comp[ii].reset_index(drop=True,inplace=True)) \
for ii in range(len(ECO_cats))]
min_max_mass_arr = []
for ii in range(len(PD_comp)):
min_max_mass_arr.append(max(PD_comp[ii].logMstar))
min_max_mass_arr.append(min(PD_comp[ii].logMstar))
min_max_mass_arr = np.array(min_max_mass_arr)
bins = Bins_array_create(min_max_mass_arr,dlogM)
bins+= 0.1
bins_list = list(bins)
for ii in bins:
if ii > 11.77:
bins_list.remove(ii)
bins = np.array(bins_list)
num_of_bins = int(len(bins) - 1)
ra_arr = np.array([(PD_comp[ii].ra) \
for ii in range(len(PD_comp))])
dec_arr = np.array([(PD_comp[ii].dec) \
for ii in range(len(PD_comp))])
cz_arr = np.array([(PD_comp[ii].cz) \
for ii in range(len(PD_comp))])
mass_arr = np.array([(PD_comp[ii].logMstar) \
for ii in range(len(PD_comp))])
halo_id_arr = np.array([(PD_comp[ii].Halo_ID) \
for ii in range(len(PD_comp))])
coords_test = np.array([sph_to_cart(ra_arr[vv],dec_arr[vv],cz_arr[vv]) \
for vv in range(len(ECO_cats))])
neigh_vals = np.array([1,2,3,5,10,20])
nn_arr_temp = [[] for uu in xrange(len(coords_test))]
nn_arr = [[] for xx in xrange(len(coords_test))]
nn_arr_nn = [[] for yy in xrange(len(neigh_vals))]
nn_idx = [[] for zz in xrange(len(coords_test))]
for vv in range(len(coords_test)):
nn_arr_temp[vv] = spatial.cKDTree(coords_test[vv])
nn_arr[vv] = np.array(nn_arr_temp[vv].query(coords_test[vv],21)[0])
nn_idx[vv] = np.array(nn_arr_temp[vv].query(coords_test[vv],21)[1])
nn_specs = [(np.array(nn_arr).T[ii].T[neigh_vals].T) for ii in \
range(len(coords_test))]
nn_mass_dist = np.array([(np.column_stack((mass_arr[qq],nn_specs[qq]))) \
for qq in range(len(coords_test))])
nn_neigh_idx = np.array([(np.array(nn_idx).T[ii].T[neigh_vals].T) for ii in \
range(len(coords_test))])
###############################################################################
sat_cols = (13,25)
sat_names = ['logMstar','cent_sat_flag']
SF_PD = [(pd.read_csv(ECO_cats[ii],sep="\s+", usecols= sat_cols,\
header=None,
skiprows=2,names=sat_names)) for ii in range(8)]
SF_PD_comp = [(SF_PD[ii][SF_PD[ii].logMstar >= 9.1]) for ii in \
range(len(ECO_cats))]
sats_num = np.array([(len(SF_PD_comp[ii][SF_PD_comp[ii].cent_sat_flag==0])) \
for ii in range(len(SF_PD_comp))])
cents_num = np.array([(len(SF_PD_comp[ii][SF_PD_comp[ii].cent_sat_flag==1])) \
for ii in range(len(SF_PD_comp))])
gal_tot = np.array([(len(SF_PD_comp[ii])) for ii in range(len(SF_PD_comp))])
print 'SAT_FRAC = {0}'.format(sats_num/gal_tot)
###############################################################################
nn_dist = {}
nn_dens = {}
mass_dat = {}
ratio_info = {}
mass_freq = [[] for xx in xrange(len(coords_test))]
bin_non_zero = [[] for xx in xrange(len(coords_test))]
for ii in range(len(coords_test)):
nn_dist[ii] = {}
nn_dens[ii] = {}
mass_dat[ii] = {}
ratio_info[ii] = {}
nn_dist[ii]['mass'] = nn_mass_dist[ii].T[0]
for jj in range(len(neigh_vals)):
nn_dist[ii][(neigh_vals[jj])] = np.array(nn_mass_dist[ii].T\
[range(1,len(neigh_vals)+1)[jj]])
nn_dens[ii][(neigh_vals[jj])] = np.column_stack((nn_mass_dist[ii].T\
[0],calc_dens(neigh_vals[jj],\
nn_mass_dist[ii].T[range(1,len\
(neigh_vals)+1)[jj]])))
idx = np.array([nn_dens[ii][neigh_vals[jj]].T[1].argsort()])
mass_dat[ii][(neigh_vals[jj])] = (nn_dens[ii][neigh_vals[jj]]\
[idx].T[0])
bin_non_zero[ii], mass_freq[ii], ratio_info[ii][neigh_vals[jj]] = \
plot_calcs(mass_dat[ii][neigh_vals[jj]],bins,dlogM)
# for ii in range(len(bin_non_zero)):
# print len(bin_non_zero[ii])
# for jj in range(len(mass_freq[ii])):
# print len(mass_freq[ii][jj])
all_mock_meds = [[] for xx in range(len(nn_mass_dist))]
all_mock_mass_means = [[] for xx in range(len(nn_mass_dist))]
for vv in range(len(nn_mass_dist)):
all_mock_meds[vv] = [(bin_func(nn_mass_dist[vv],bins,(jj+1))) \
for jj in range(len(nn_mass_dist[vv].T)-1)]
all_mock_mass_means[vv] = (mean_bin_mass(nn_mass_dist[vv],bins))
# for ii in range(len(all_mock_meds)):
# for jj in range(len(all_mock_meds[ii])):
# print len(all_mock_meds[ii][jj][0])
med_plot_arr = [([[] for yy in xrange(len(nn_mass_dist))]) \
for xx in xrange(len(neigh_vals))]
for ii in range(len(neigh_vals)):
for jj in range(len(nn_mass_dist)):
med_plot_arr[ii][jj] = all_mock_meds[jj][ii]
# for ii in range(len(neigh_vals)):
# for jj in range(len(nn_mass_dist)):
# print len(all_mock_meds[jj][ii])
mass_freq_plot = (np.array(mass_freq[0]))
max_lim = [[] for xx in range(len(mass_freq_plot.T))]
min_lim = [[] for xx in range(len(mass_freq_plot.T))]
for jj in range(len(mass_freq_plot.T)):
max_lim[jj] = max(mass_freq_plot.T[jj])
min_lim[jj] = min(mass_freq_plot.T[jj])
###############################################################################
# ordered_mass = nn_mass_dist[0].T[0][(nn_mass_dist[0].T[0].argsort())]
# dist_cont = [[[[] for zz in xrange(len(bins)-1)] for yy in \
# xrange(len(nn_mass_dist))] for xx in \
# xrange(1,len(nn_mass_dist[0].T))]
# for ii in xrange(len(nn_mass_dist)):
# sorting_test = np.digitize(nn_mass_dist[ii].T[0],bins)
# bin_nums = np.unique(sorting_test)
# bin_nums_list = list(bin_nums)
# # if 13 not in bin_nums:
# # bin_nums_list.append(13)
# # if 14 not in bin_nums:
# # bin_nums_list.append(14)
# # bin_nums = np.array(bin_nums_list)
# # if 14 in bin_nums_list:
# # bin_nums_list.remove(14)
# # bin_nums = np.array(bin_nums_list)
# for dd in range(1,num_of_bins+1):
# if dd not in bin_nums:
# bin_nums_list.append(dd)
# if len(bins) in bin_nums_list:
# bin_nums_list.remove(len(bins))
# bin_nums = np.array(bin_nums_list)
# for jj in xrange(1,len(nn_mass_dist[ii].T)):
# for hh in bin_nums:
# dist_cont[jj-1][ii][hh-1] = (nn_mass_dist[ii].T[jj]\
# [sorting_test==hh])
# if len(dist_cont[jj-1][ii][hh-1]) == 0:
# (dist_cont[jj-1][ii][hh-1]) = list(dist_cont[jj-1][ii][hh-1])
# (dist_cont[jj-1][ii][hh-1]).append(np.zeros\
# (len(dist_cont[1][0][0])))
# (dist_cont[jj-1][ii][hh-1]) = np.array((dist_cont[jj-1][ii]\
# [hh-1]))
# for ii in xrange(len(nn_mass_dist)):
# for jj in xrange(1,len(nn_mass_dist[ii].T)):
# for hh in bin_nums:
# print len(dist_cont[jj-1][ii][hh-1])
###############################################################################
# top_68 = [[[[]for ll in xrange(len(bin_nums))]for yy in \
# xrange(len(nn_mass_dist))] for xx in xrange(len(neigh_vals))]
# low_68 = [[[[]for ll in xrange(len(bin_nums))]for yy in \
# xrange(len(nn_mass_dist))] for xx in xrange(len(neigh_vals))]
# top_95 = [[[[]for ll in xrange(len(bin_nums))]for yy in \
# xrange(len(nn_mass_dist))] for xx in xrange(len(neigh_vals))]
# low_95 = [[[[]for ll in xrange(len(bin_nums))]for yy in \
# xrange(len(nn_mass_dist))] for xx in xrange(len(neigh_vals))]
# med_50 = [[[[]for ll in xrange(len(bin_nums))]for yy in \
# xrange(len(nn_mass_dist))] for xx in xrange(len(neigh_vals))]
# for aa in xrange(len(neigh_vals)):
# for bb in xrange(len(nn_mass_dist)):
# for cc in xrange(len(dist_cont[aa][bb])):
# top_68[aa][bb][cc] = np.percentile(dist_cont[aa][bb][cc],84)
# low_68[aa][bb][cc] = np.percentile(dist_cont[aa][bb][cc],16)
# top_95[aa][bb][cc] = np.percentile(dist_cont[aa][bb][cc],97.5)
# low_95[aa][bb][cc] = np.percentile(dist_cont[aa][bb][cc],2.5)
# med_50[aa][bb][cc] = np.median((dist_cont[aa][bb][cc]))
# top_68 = np.array(top_68)
# low_68 = np.array(low_68)
# top_95 = np.array(top_95)
# low_95 = np.array(low_95)
# med_50 = np.array(med_50)
##not working with 1 dec scatter...
###############################################################################
frac_vals = [2,4,10]
nn_plot_arr = [[[] for yy in xrange(len(nn_mass_dist))] for xx in \
xrange(len(neigh_vals))]
for ii in range(len(neigh_vals)):
for jj in range(len(nn_mass_dist)):
nn_plot_arr[ii][jj] = (ratio_info[jj][neigh_vals[ii]])
plot_frac_arr = [[[[] for yy in xrange(len(nn_mass_dist))] \
for zz in xrange(len(frac_vals))] for xx in \
xrange(len(nn_plot_arr))]
for jj in range(len(nn_mass_dist)):
for hh in range(len(frac_vals)):
for ii in range(len(neigh_vals)):
plot_frac_arr[ii][hh][jj] = nn_plot_arr[ii][jj][frac_vals[hh]]
###############################################################################
###############################################################################
###############################################################################
eco_path = r"C:\Users\Hannah\Desktop\Vanderbilt_REU\Stellar_mass_env_density"
eco_path += r"\Catalogs\ECO_true"
eco_cols = np.array([0,1,2,4])
###############################################################################
###############################################################################
###############################################################################
ECO_true = (Index(eco_path,'.txt'))
names = ['ra','dec','cz','logMstar']
PD_eco = pd.read_csv(ECO_true[0],sep="\s+", usecols=(eco_cols),header=None,\
skiprows=1,names=names)
eco_comp = PD_eco[PD_eco.logMstar >= 9.1]
ra_eco = (np.array(eco_comp)).T[0]
dec_eco = (np.array(eco_comp)).T[1]
cz_eco = (np.array(eco_comp)).T[2]
mass_eco = (np.array(eco_comp)).T[3]
coords_eco = sph_to_cart(ra_eco,dec_eco,cz_eco)
eco_neighbor_tree = spatial.cKDTree(coords_eco)
eco_tree_dist = np.array(eco_neighbor_tree.query(coords_eco,\
(neigh_vals[-1]+1))[0])
eco_mass_dist = np.column_stack((mass_eco,eco_tree_dist.T[neigh_vals].T))
##range 1,7 because of the six nearest neighbors (and fact that 0 is mass)
##the jj is there to specify which index in the [1,6] array
eco_dens = ([calc_dens(neigh_vals[jj],\
(eco_mass_dist.T[range(1,7)[jj]])) for jj in range\
(len(neigh_vals))])
eco_mass_dens = [(np.column_stack((mass_eco,eco_dens[ii]))) for ii in \
range(len(neigh_vals))]
eco_idx = [(eco_mass_dens[jj].T[1].argsort()) for jj in \
range(len(neigh_vals))]
eco_mass_dat = [(eco_mass_dens[jj][eco_idx[jj]].T[0]) for jj in \
range(len(neigh_vals))]
eco_ratio_info = [[] for xx in xrange(len(eco_mass_dat))]
for qq in range(len(eco_mass_dat)):
centers_non_zero, eco_freq, eco_ratio_info[qq] = plot_calcs(eco_mass_dat[qq],\
bins,dlogM)
eco_medians = [[] for xx in xrange(len(eco_mass_dat))]
for jj in (range(len(eco_mass_dat))):
eco_medians[jj] = np.array(bin_func(eco_mass_dist,bins,(jj+1),\
bootstrap=True))
###############################################################################
###############################################################################
fig,ax = plt.subplots(figsize=(8,8))
ax.set_title('Mass Distribution',fontsize=18)
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18)
ax.set_ylabel(r'$\log\ (\frac{N_{gal}}{N_{total}*dlogM_{*}})$',fontsize=20)
ax.set_yscale('log')
ax.set_xlim(9.1,11.9)
ax.tick_params(axis='both', labelsize=14)
for ii in range(len(mass_freq)):
ax.plot(bin_centers,mass_freq[ii],color='silver')
ax.fill_between(bin_centers,max_lim,min_lim,color='silver',alpha=0.1)
ax.errorbar(bin_centers,eco_freq[0],yerr=eco_freq[1],color='dodgerblue',\
linewidth=2,label='ECO')
ax.legend(loc='best')
plt.subplots_adjust(left=0.15, bottom=0.1, right=0.85, top=0.94,\
hspace=0.2,wspace=0.2)
plt.show()
###############################################################################
A = {}
nn_dict = {1:0,2:1,3:2,5:3,10:4,20:5}
coln_dict = {2:0,4:1,10:2}
nn_keys = np.sort(nn_dict.keys())
col_keys = np.sort(coln_dict.keys())
zz_num = len(plot_frac_arr[nn_dict[1]][coln_dict[2]])
for nn in nn_keys:
for coln in col_keys:
bin_str = '{0}_{1}'.format(nn,coln)
for cc in range(zz_num):
zz_arr = np.array(plot_frac_arr[nn_dict[nn]][coln_dict[coln]][cc])
n_elem = len(zz_arr)
if cc == 0:
zz_tot = np.zeros((n_elem,1))
zz_tot = np.insert(zz_tot,len(zz_tot.T),zz_arr,1)
zz_tot = np.array(np.delete(zz_tot,0,axis=1))
for kk in xrange(len(zz_tot)):
zz_tot[kk][zz_tot[kk] == np.inf] = np.nan
zz_tot_max = [np.nanmax(zz_tot[kk]) for kk in xrange(len(zz_tot))]
zz_tot_min = [np.nanmin(zz_tot[kk]) for kk in xrange(len(zz_tot))]
A[bin_str] = [zz_tot_max,zz_tot_min]
###############################################################################
np.seterr(divide='ignore',invalid='ignore')
nrow_num = int(6)
ncol_num = int(3)
zz = int(0)
fig, axes = plt.subplots(nrows=nrow_num, ncols=ncol_num, \
figsize=(100,200), sharex= True,sharey=True)
axes_flat = axes.flatten()
fig.text(0.01, 0.5, 'High Density Counts/Lower Density Counts', ha='center', \
va='center',rotation='vertical',fontsize=20)
# fig.suptitle("Percentile Trends", fontsize=18)
while zz <= 16:
for ii in range(len(eco_ratio_info)):
for hh in range(len(eco_ratio_info[0][1])):
for jj in range(len(nn_mass_dist)):
# upper = A['{0}_{1}'.format(neigh_vals[ii],frac_vals[hh])][0]
# lower = A['{0}_{1}'.format(neigh_vals[ii],frac_vals[hh])][1]
# plot_bands(bin_centers,upper,lower,axes_flat[zz] )
plot_all_rats(bin_centers,(plot_frac_arr[ii][hh][jj]),\
neigh_vals[ii],axes_flat[zz],hh,zz)
plot_eco_rats(bin_centers,(eco_ratio_info[ii]),neigh_vals[ii],\
axes_flat[zz],hh,zz)
zz += 1
plt.subplots_adjust(left=0.04, bottom=0.09, right=0.98, top=0.98,\
hspace=0,wspace=0)
plt.show()
###############################################################################
B = {}
yy_num = len(med_plot_arr[0])
for nn in range(len(med_plot_arr)):
for ii in range(yy_num):
med_str = '{0}'.format(nn)
yy_arr = med_plot_arr[nn][ii]
n_y_elem = len(yy_arr)
if ii == 0:
yy_tot = np.zeros((n_y_elem,1))
yy_tot = np.insert(yy_tot,len(yy_tot.T),yy_arr,1)
yy_tot = np.array(np.delete(yy_tot,0,axis=1))
yy_tot_max = [np.nanmax(yy_tot[kk]) for kk in xrange(len(yy_tot))]
yy_tot_min = [np.nanmin(yy_tot[kk]) for kk in xrange(len(yy_tot))]
B[med_str] = [yy_tot_max,yy_tot_min]
###############################################################################
nrow_num_mass = int(2)
ncol_num_mass = int(3)
fig, axes = plt.subplots(nrows=nrow_num_mass, ncols=ncol_num_mass, \
figsize=(100,200), sharex= True, sharey = True)
axes_flat = axes.flatten()
fig.text(0.01, 0.5, 'Distance to Nth Neighbor (Mpc)', ha='center', \
va='center',rotation='vertical',fontsize=20)
zz = int(0)
while zz <=4:
for ii in range(len(med_plot_arr)):
for vv in range(len(nn_mass_dist)):
# lower_m = B['{0}'.format(ii)][0]
# upper_m = B['{0}'.format(ii)][1]
# plot_med_range(bin_centers,top_95[ii][vv],low_95[ii][vv],\
# axes_flat[zz],0.05,color='lightsteelblue')
# plot_med_range(bin_centers,top_68[ii][vv],low_68[ii][vv],\
# axes_flat[zz],0.15,color='gainsboro')
# plot_bands(bin_centers,upper_m,lower_m,axes_flat[zz])
plot_all_meds(bin_centers,med_plot_arr[ii][vv],axes_flat[zz],\
zz)
plot_eco_meds(bin_centers,eco_medians[ii][0],\
eco_medians[ii][1],eco_medians[ii][2],\
axes_flat[zz],zz)
zz += 1
plt.subplots_adjust(left=0.05, bottom=0.09, right=0.98, top=0.98,\
hspace=0,wspace=0)
plt.show()
###############################################################################
hist_low_info = {}
hist_high_info = {}
for ii in xrange(len(coords_test)):
hist_low_info[ii] = {}
hist_high_info[ii] = {}
for jj in range(len(neigh_vals)):
hist_low_info[ii][neigh_vals[jj]],hist_high_info[ii][neigh_vals[jj]] \
= hist_calcs(mass_dat[ii][neigh_vals[jj]],bins,dlogM)
frac_vals = [2,4,10]
hist_low_arr = [[[] for yy in xrange(len(nn_mass_dist))] for xx in \
xrange(len(neigh_vals))]
hist_high_arr = [[[] for yy in xrange(len(nn_mass_dist))] for xx in \
xrange(len(neigh_vals))]
for ii in range(len(neigh_vals)):
for jj in range(len(nn_mass_dist)):
hist_low_arr[ii][jj] = (hist_low_info[jj][neigh_vals[ii]])
hist_high_arr[ii][jj] = (hist_high_info[jj][neigh_vals[ii]])
plot_low_hist = [[[[] for yy in xrange(len(nn_mass_dist))] \
for zz in xrange(len(frac_vals))] for xx in \
xrange(len(hist_low_arr))]
plot_high_hist = [[[[] for yy in xrange(len(nn_mass_dist))] \
for zz in xrange(len(frac_vals))] for xx in \
xrange(len(hist_high_arr))]
for jj in range(len(nn_mass_dist)):
for hh in range(len(frac_vals)):
for ii in range(len(neigh_vals)):
plot_low_hist[ii][hh][jj] = hist_low_arr[ii][jj][frac_vals[hh]]
plot_high_hist[ii][hh][jj] = hist_high_arr[ii][jj][frac_vals[hh]]
###############################################################################
C = {}
D = {}
nn_dict = {1:0,2:1,3:2,5:3,10:4,20:5}
coln_dict = {2:0,4:1,10:2}
nn_keys = np.sort(nn_dict.keys())
col_keys = np.sort(coln_dict.keys())
vv_num = len(plot_low_hist[nn_dict[1]][coln_dict[2]])
for nn in nn_keys:
for coln in col_keys:
bin_str = '{0}_{1}'.format(nn,coln)
for cc in range(vv_num):
vv_arr = np.array(plot_low_hist[nn_dict[nn]][coln_dict[coln]][cc])
n_elem = len(vv_arr)
if cc == 0:
vv_tot = np.zeros((n_elem,1))
vv_tot = np.insert(vv_tot,len(vv_tot.T),vv_arr,1)
vv_tot = np.array(np.delete(vv_tot,0,axis=1))
for kk in xrange(len(vv_tot)):
vv_tot[kk][vv_tot[kk] == np.inf] = np.nan
vv_tot_max = [np.nanmax(vv_tot[kk]) for kk in xrange(len(vv_tot))]
vv_tot_min = [np.nanmin(vv_tot[kk]) for kk in xrange(len(vv_tot))]
C[bin_str] = [vv_tot_max,vv_tot_min]
hh_num = len(plot_high_hist[nn_dict[1]][coln_dict[2]])
for nn in nn_keys:
for coln in col_keys:
bin_str = '{0}_{1}'.format(nn,coln)
for cc in range(hh_num):
hh_arr = np.array(plot_high_hist[nn_dict[nn]][coln_dict[coln]][cc])
n_elem = len(hh_arr)
if cc == 0:
hh_tot = np.zeros((n_elem,1))
hh_tot = np.insert(hh_tot,len(hh_tot.T),hh_arr,1)
hh_tot = np.array(np.delete(hh_tot,0,axis=1))
for kk in xrange(len(hh_tot)):
hh_tot[kk][hh_tot[kk] == np.inf] = np.nan
hh_tot_max = [np.nanmax(hh_tot[kk]) for kk in xrange(len(hh_tot))]
hh_tot_min = [np.nanmin(hh_tot[kk]) for kk in xrange(len(hh_tot))]
D[bin_str] = [hh_tot_max,hh_tot_min]
###############################################################################
nrow_num = int(6)
ncol_num = int(3)
fig, axes = plt.subplots(nrows=nrow_num, ncols=ncol_num, \
figsize=(150,200), sharex= True,sharey=True)
axes_flat = axes.flatten()
fig.text(0.02, 0.5,r'$\log\ (\frac{N_{gal}}{N_{total}*dlogM_{*}})$', ha='center',
va='center',rotation='vertical',fontsize=20)
for ii in range(len(mass_dat)):
zz = 0
for jj in range(len(neigh_vals)):
for hh in range(3):
# upper = C['{0}_{1}'.format(neigh_vals[jj],frac_vals[hh])][0]
# lower = C['{0}_{1}'.format(neigh_vals[jj],frac_vals[hh])][1]
# upper_2 = D['{0}_{1}'.format(neigh_vals[jj],frac_vals[hh])][0]
# lower_2 = D['{0}_{1}'.format(neigh_vals[jj],frac_vals[hh])][1]
# plot_bands(bin_centers,upper,lower,axes_flat[zz])
# plot_bands(bin_centers,upper_2,lower_2,axes_flat[zz])
plot_hists(mass_dat[ii][neigh_vals[jj]],neigh_vals[jj],bins,dlogM,\
axes_flat[zz], hh, zz)
if ii == 0:
plot_eco_hists(eco_mass_dat[jj],bins,dlogM,\
axes_flat[zz],hh,zz)
zz += int(1)
plt.subplots_adjust(left=0.07, bottom=0.09, right=0.98, top=0.98,\
hspace=0, wspace=0)
plt.show()
###############################################################################
def schechter_log_func(stellar_mass,phi_star,alpha,m_star):
"""
Returns a plottable Schechter function for the
stellar mass functions of galaxies
Parameters
----------
stellar_mass: array-like
An array of unlogged stellar mass values which
will eventually be the x-axis values the function
is plotted against
phi_star: float-like
A constant which normalizes (?) the function;
Moves the graph up and down
alpha: negative integer-like
The faint-end, or in this case, low-mass slope;
Describes the power-law portion of the curve
m_star: float-like
Unlogged value of the characteristic (?) stellar
mass; the "knee" of the function, where the
power-law gives way to the exponential portion
Returns
-------
res: array-like
Array of values prepadodgerblue to be plotted on a log
scale to display the Schechter function
"""
constant = np.log(10) * phi_star
log_M_Mstar = np.log10(stellar_mass/m_star)
res = constant * 10**(log_M_Mstar * (alpha+1)) * \
np.exp(-10**log_M_Mstar)
return res
###############################################################################
xdata = 10**bin_centers
p0 = (1,-1.05,10**10.64)
param_arr = [[] for ii in range(len(mass_freq)+1)]
fig,axes = plt.subplots(nrows=3,ncols=3,sharex=True,sharey=True,\
figsize=(150,200))
axes_flat = axes.flatten()
##rather than having mass_freq go out of index, just used if statement and
##directed it to the eco info
for ii in range(len(mass_freq)+1):
if ii == range(len(mass_freq)+1)[-1]:
ydata = eco_freq[0]
opt_v, est_cov = optimize.curve_fit(schechter_log_func,xdata,ydata,\
p0=p0,sigma=eco_freq[1])
else:
ydata = (mass_freq[ii])
opt_v, est_cov = optimize.curve_fit(schechter_log_func,xdata,ydata,\
p0=p0)
schech_vals = schechter_log_func(10**bin_centers,opt_v[0],opt_v[1],\
opt_v[2])
param_arr[ii] = opt_v
param_arr = np.array(param_arr)
ax = axes_flat[ii]
ax.set_yscale('log')
ax.set_ylim([10**-3,10**0])
ax.set_xlim([9.1,11.9])
ax.set_yticks([10**-2,10**-1,10**0])
ax.plot(bin_centers,schech_vals,label='Schechter',color='silver')
if ii == 8:
ax.errorbar(bin_centers,ydata,yerr=eco_freq[1],color='dodgerblue',\
label='ECO')
else:
ax.plot(bin_centers,ydata,label='Mock',color='darkorchid')
if ii == 0 or ii == 8:
ax.legend(loc='best')
if ii == 7:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18)
plt.subplots_adjust(left=0.03, bottom=0.08, right=0.99, top=0.99,\
hspace=0,wspace=0)
plt.show()
###############################################################################
eco_low = {}
eco_high = {}
for jj in range(len(neigh_vals)):
eco_low[neigh_vals[jj]] = {}
eco_high[neigh_vals[jj]] = {}
eco_low[neigh_vals[jj]], eco_high[neigh_vals[jj]] = hist_calcs\
(eco_mass_dat[jj],bins,dlogM)
###############################################################################
# fig,ax = plt.subplots()
# ax.set_yscale('log')
# ax.plot(bin_centers,eco_low[1][2])
# plt.show()
###############################################################################
def param_finder(hist_counts,bin_centers):
"""
Parameters
----------
hist-counts: array-like
An array with stellar mass function values which will be used in the
Schechter function parameterization
bin_centers: array-like
An array with the same number of values as hist_counts; used as
independent variable in Schechter function
Returns
-------
opt_v: array-like
Array with three values: phi_star, alpha, and M_star
res_arr: array-like
Array with two values: alpha and log_M_star
"""
xdata = 10**bin_centers
p0 = (1,-1.05,10**10.64)
opt_v,est_cov = optimize.curve_fit(schechter_log_func,xdata,\
hist_counts,p0=p0)
alpha = opt_v[1]
log_m_star = np.log10(opt_v[2])
res_arr = np.array([alpha,log_m_star])
return opt_v, res_arr
###############################################################################
###Test that param_finder is working
# opt_v,test_arr = param_finder(eco_low[1][2],bin_centers)
# schech_vals = schechter_log_func(10**bin_centers,opt_v[0],opt_v[1],\
# opt_v[2])
# ####THE error isn't working. Stops after 800 iterations.
# # opt_v,est_v = optimize.curve_fit(schechter_log_func,10**bin_centers,
# # eco_low[1][2],p0 = (1,-1.05,10**10.64),sigma=eco_low[1]['low_err'][0],\
# # absolute_sigma=True)
# fig,ax = plt.subplots()
# ax.set_yscale('log')
# ax.plot(bin_centers,eco_low[1][2])
# ax.plot(bin_centers,schech_vals)
# plt.show()
###############################################################################
def perc_calcs(mass,bins,dlogM):
mass_counts, edges = np.histogram(mass,bins)
mass_freq = mass_counts/float(len(mass))/dlogM
return mass_freq
###############################################################################
def deciles(mass):
dec_val = int(len(mass)/10)
res_list = [[] for bb in range(10)]
for aa in range(0,10):
if aa == 9:
res_list[aa] = mass[aa*dec_val:]
else:
res_list[aa] = mass[aa*dec_val:(aa+1)*dec_val]
return res_list
###############################################################################
eco_dec = {}
for cc in range(len(eco_mass_dat)):
eco_dec[neigh_vals[cc]] = deciles(eco_mass_dat[cc])
# for ii in range(len(eco_dec[1])):
# print len(eco_dec[1][ii])
eco_dec_smf = {}
for ss in neigh_vals:
eco_dec_smf[ss] = {}
for tt in range(len(eco_dec[ss])):
eco_dec_smf[ss][tt] = perc_calcs(eco_dec[ss][tt],bins,dlogM)
eco_dec_alpha = {}
eco_dec_logMstar = {}
for oo in neigh_vals:
eco_dec_alpha[oo] = []
eco_dec_logMstar[oo] = []
for pp in range(len(eco_dec[oo])):
opt_v, temp_res_arr = param_finder(eco_dec_smf[oo][pp],bin_centers)
eco_dec_alpha[oo].append(temp_res_arr[0])
eco_dec_logMstar[oo].append(temp_res_arr[1])
ten_x = range(1,11)
# fig,ax = plt.subplots()
# for ii in neigh_vals:
# ax.plot(ten_x,eco_dec_alpha[ii])
# ax.set_xlim(0,11)
# plt.show()
###############################################################################
def plot_deciles(dec_num,y_vals,ax,plot_idx,eco=False,logMstar=False,\
color='gray'):
if eco == True:
titles = [1,2,3,5,10,20]
ax.set_xlim(0,11)
if logMstar == True:
ax.set_ylim(10,12)
ax.set_yticks(np.arange(10,12,0.5))
else:
ax.set_ylim(-1.25,-1.)
ax.set_yticks(np.arange(-1.25,-1.,0.05))
ax.set_xticks(range(1,11))
title_here = 'n = {0}'.format(titles[plot_idx])
ax.text(0.05, 0.95, title_here,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=18)
if plot_idx == 4:
ax.set_xlabel('Decile',fontsize=18)
if eco == True:
ax.plot(dec_num,y_vals,marker='o',color=color,linewidth=2.5,\
markeredgecolor=color)
else:
ax.plot(dec_num,y_vals,color=color,alpha=0.5)
###############################################################################
# nrow_num_mass = int(2)
# ncol_num_mass = int(3)
# fig, axes = plt.subplots(nrows=nrow_num_mass, ncols=ncol_num_mass, \
# figsize=(100,200), sharex= True, sharey = True)
# axes_flat = axes.flatten()
# zz = int(0)
# while zz <=5:
# ii = neigh_vals[zz]
# plot_deciles(ten_x,eco_dec_logMstar[ii],axes_flat[zz],zz,eco=True,\
# logMstar=True)
# zz += 1
# plt.subplots_adjust(left=0.05, bottom=0.09, right=1.00, top=1.00,\
# hspace=0,wspace=0)
# plt.show()
###############################################################################
def quartiles(mass):
dec_val = int(len(mass)/4)
res_list = [[] for bb in range(4)]
for aa in range(0,4):
if aa == 3:
res_list[aa] = mass[aa*dec_val:]
else:
res_list[aa] = mass[aa*dec_val:(aa+1)*dec_val]
return res_list
###############################################################################
eco_quarts = {}
for cc in range(len(eco_mass_dat)):
eco_quarts[neigh_vals[cc]] = quartiles(eco_mass_dat[cc])
eco_quarts_smf = {}
for ss in neigh_vals:
eco_quarts_smf[ss] = {}
for tt in range(len(eco_quarts[ss])):
eco_quarts_smf[ss][tt] = perc_calcs(eco_quarts[ss][tt],bins,dlogM)
eco_quarts_alpha = {}
eco_quarts_logMstar = {}
for oo in neigh_vals:
eco_quarts_alpha[oo] = []
eco_quarts_logMstar[oo] = []
for pp in range(len(eco_quarts[oo])):
opt_v, temp_res_arr = param_finder(eco_quarts_smf[oo][pp],bin_centers)
eco_quarts_alpha[oo].append(temp_res_arr[0])
eco_quarts_logMstar[oo].append(temp_res_arr[1])
quart_x = range(1,5)
# fig,ax = plt.subplots()
# for ii in neigh_vals:
# ax.plot(quart_x,eco_quarts_alpha[ii])
# ax.set_xlim(0,5)
# plt.show()
###############################################################################
def plot_quartiles(quart_num,y_vals,ax,plot_idx,eco=False,logMstar=False,\
color='gray'):
if eco == True:
titles = [1,2,3,5,10,20]
ax.set_xlim(0,5)
if logMstar == True:
ax.set_ylim(10,12)
ax.set_yticks(np.arange(10,12,0.5))
else:
ax.set_ylim(-1.2,-1.)
ax.set_yticks(np.arange(-1.2,-1.,0.04))
ax.set_xticks(range(1,5))
title_here = 'n = {0}'.format(titles[plot_idx])
ax.text(0.05, 0.95, title_here,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=18)
if plot_idx == 4:
ax.set_xlabel('Quartiles',fontsize=18)
if eco == True:
ax.plot(quart_num,y_vals,marker='o',color=color,linewidth=2,\
markeredgecolor=color)
else:
ax.plot(quart_num,y_vals,color=color)
###############################################################################
# nrow_num_mass = int(2)
# ncol_num_mass = int(3)
# fig, axes = plt.subplots(nrows=nrow_num_mass, ncols=ncol_num_mass, \
# figsize=(100,200), sharex= True, sharey = True)
# axes_flat = axes.flatten()
# zz = int(0)
# while zz <=5:
# ii = neigh_vals[zz]
# plot_quartiles(quart_x,eco_quarts_logMstar[ii],axes_flat[zz],zz,eco=True,\
# logMstar=True)
# zz += 1
# plt.subplots_adjust(left=0.05, bottom=0.09, right=1.00, top=1.00,\
# hspace=0,wspace=0)
# plt.show()
###############################################################################
def quart_finder(mass,bins,dlogM,neigh_vals):
quarts = {}
for ii in neigh_vals:
quarts[ii] = quartiles(mass[ii])
quarts_smf = {}
for ss in neigh_vals:
quarts_smf[ss] = {}
for tt in range(len(quarts[ss])):
quarts_smf[ss][tt] = perc_calcs(quarts[ss][tt],bins,dlogM)
quarts_alpha = {}
quarts_logMstar = {}
for oo in neigh_vals:
quarts_alpha[oo] = []
quarts_logMstar[oo] = []
for pp in range(len(quarts[oo])):
opt_v, temp_res_arr = param_finder(quarts_smf[oo][pp],bin_centers)
quarts_alpha[oo].append(temp_res_arr[0])
quarts_logMstar[oo].append(temp_res_arr[1])
return quarts_alpha, quarts_logMstar
###############################################################################
mock_quarts_alpha_dict = {}
mock_quarts_logMstar_dict = {}
for jj in range(len(mass_dat)):
mock_quarts_alpha_dict[jj], mock_quarts_logMstar_dict[jj] = quart_finder\
(mass_dat[jj],bins,dlogM,neigh_vals)
###############################################################################
def dec_finder(mass,bins,dlogM,neigh_vals):
decs = {}
for ii in neigh_vals:
decs[ii] = deciles(mass[ii])
decs_smf = {}
for ss in neigh_vals:
decs_smf[ss] = {}
for tt in range(len(decs[ss])):
decs_smf[ss][tt] = perc_calcs(decs[ss][tt],bins,dlogM)
decs_alpha = {}
decs_logMstar = {}
for oo in neigh_vals:
decs_alpha[oo] = []
decs_logMstar[oo] = []
for pp in range(len(decs[oo])):
opt_v, temp_res_arr = param_finder(decs_smf[oo][pp],bin_centers)
decs_alpha[oo].append(temp_res_arr[0])
decs_logMstar[oo].append(temp_res_arr[1])
return decs_alpha, decs_logMstar
###############################################################################
mock_dec_alpha_dict = {}
mock_dec_logMstar_dict = {}
for jj in range(len(mass_dat)):
mock_dec_alpha_dict[jj], mock_dec_logMstar_dict[jj] = dec_finder\
(mass_dat[jj],bins,dlogM,neigh_vals)
###############################################################################
###quartiles logMstar
nrow_num_mass = int(2)
ncol_num_mass = int(3)
fig, axes = plt.subplots(nrows=nrow_num_mass, ncols=ncol_num_mass, \
figsize=(100,200), sharex= True, sharey = True)
axes_flat = axes.flatten()
fig.text(0.01, 0.5, '$\log\ (M_{*}/M_{\odot})$', ha='center', \
va='center',rotation='vertical',fontsize=20)
zz = int(0)
while zz <=5:
ii = neigh_vals[zz]
for ff in range(len(mass_dat)):
plot_quartiles(quart_x,mock_quarts_logMstar_dict[ff][ii],axes_flat[zz],\
zz,logMstar=True)
plot_quartiles(quart_x,eco_quarts_logMstar[ii],axes_flat[zz],zz,\
logMstar=True,color='crimson',eco=True)
zz += 1
plt.subplots_adjust(left=0.05, bottom=0.09, right=1.00, top=1.00,\
hspace=0,wspace=0)
plt.show()
###############################################################################
nrow_num_mass = int(2)
ncol_num_mass = int(3)
fig, axes = plt.subplots(nrows=nrow_num_mass, ncols=ncol_num_mass, \
figsize=(100,200), sharex= True, sharey = True)
axes_flat = axes.flatten()
fig.text(0.01, 0.5, '$\log\ (M_{*}/M_{\odot})$', ha='center', \
va='center',rotation='vertical',fontsize=20)
zz = int(0)
while zz <=5:
ii = neigh_vals[zz]
for ff in range(len(mass_dat)):
plot_deciles(ten_x,mock_dec_logMstar_dict[ff][ii],axes_flat[zz],\
zz,logMstar=True)
plot_deciles(ten_x,eco_dec_logMstar[ii],axes_flat[zz],zz,\
logMstar=True,color='crimson',eco=True)
zz += 1
plt.subplots_adjust(left=0.05, bottom=0.09, right=1.00, top=1.00,\
hspace=0,wspace=0)
plt.show()
###############################################################################
nrow_num_mass = int(2)
ncol_num_mass = int(3)
fig, axes = plt.subplots(nrows=nrow_num_mass, ncols=ncol_num_mass, \
figsize=(100,200), sharex= True, sharey = True)
axes_flat = axes.flatten()
fig.text(0.01, 0.5, r'$\alpha$', ha='center', \
va='center',rotation='vertical',fontsize=25)
zz = int(0)
while zz <=5:
ii = neigh_vals[zz]
for ff in range(len(mass_dat)):
plot_deciles(ten_x,mock_dec_alpha_dict[ff][ii],axes_flat[zz],zz,\
logMstar=False)
plot_deciles(ten_x,eco_dec_alpha[ii],axes_flat[zz],zz,\
logMstar=False,color='crimson',eco=True)
zz += 1
plt.subplots_adjust(left=0.05, bottom=0.09, right=1.00, top=1.00,\
hspace=0,wspace=0)
plt.show()
###############################################################################
nrow_num_mass = int(2)
ncol_num_mass = int(3)
fig, axes = plt.subplots(nrows=nrow_num_mass, ncols=ncol_num_mass, \
figsize=(100,200), sharex= True, sharey = True)
axes_flat = axes.flatten()
fig.text(0.01, 0.5, r'$\alpha$', ha='center', \
va='center',rotation='vertical',fontsize=25)
zz = int(0)
while zz <=5:
ii = neigh_vals[zz]
for ff in range(len(mass_dat)):
plot_quartiles(quart_x,mock_quarts_alpha_dict[ff][ii],axes_flat[zz],zz,\
logMstar=False)
plot_quartiles(quart_x,eco_quarts_alpha[ii],axes_flat[zz],zz,\
logMstar=False,color='crimson',eco=True)
zz += 1
plt.subplots_adjust(left=0.05, bottom=0.09, right=1.00, top=1.00,\
hspace=0,wspace=0)
plt.show()
###############################################################################
def schechter_real_func(mean_of_mass_bin,bins,phi_star,alpha,Mstar):
M_over_mstar = mean_of_mass_bin/Mstar
temp_val = phi_star/Mstar * M_over_mstar**(alpha+1) * \
np.exp(- M_over_mstar)
int_val = [[] for ii in range(len(temp_val))]
for ii in range(len(temp_val)):
int_val[ii] = integrate.quad(temp_val[ii],bins[ii],bins[ii+1])
return int_val
###############################################################################
###############################################################################
###############################################################################
def find_params(bin_int,mean_mass,count_err):
"""
Parameters
----------
Returns
-------
opt_v: array-like
Array with three values: phi_star, alpha, and M_star
res_arr: array-like
Array with two values: alpha and log_M_star
"""
xdata = 10**mean_mass
p0 = (1,-1.05,10**10.64)
opt_v,est_cov = optimize.curve_fit(schechter_real_func,xdata,\
bin_int,p0=p0,sigma=count_err)
alpha = opt_v[1]
log_m_star = np.log10(opt_v[2])
res_arr = np.array([alpha,log_m_star])
return opt_v, res_arr
###############################################################################
###############################################################################
truth_vals = {}
for ii in range(len(halo_id_arr)):
truth_vals[ii] = {}
for jj in neigh_vals:
halo_id_neigh = halo_id_arr[ii][nn_neigh_idx[ii].T[neigh_dict[jj]]].values
truth_vals[ii][jj] = halo_id_neigh==halo_id_arr[ii].values
###############################################################################
halo_frac = {}
for ii in range(len(mass_arr)):
halo_frac[ii] = {}
mass_binning = np.digitize(mass_arr[ii],bins)
bins_to_use = list(np.unique(mass_binning))
if (len(bins)-1) not in bins_to_use:
bins_to_use.append(len(bins)-1)
if len(bins) in bins_to_use:
bins_to_use.remove(len(bins))
for jj in neigh_vals:
one_zero = truth_vals[ii][jj].astype(int)
frac = []
for xx in bins_to_use:
truth_binning = one_zero[mass_binning==xx]
num_in_bin = len(truth_binning)
if num_in_bin == 0:
num_in_bin = np.nan
num_same_halo = np.count_nonzero(truth_binning==1)
frac.append(num_same_halo/(1.*num_in_bin))
halo_frac[ii][jj] = frac
###############################################################################
nn_dict = {1:0,2:1,3:2,5:3,10:4,20:5}
mean_mock_halo_frac = {}
for ii in neigh_vals:
for jj in range(len(halo_frac)):
bin_str = '{0}'.format(ii)
oo_arr = halo_frac[jj][ii]
n_o_elem = len(oo_arr)
if jj == 0:
oo_tot = np.zeros((n_o_elem,1))
oo_tot = np.insert(oo_tot,len(oo_tot.T),oo_arr,1)
oo_tot = np.array(np.delete(oo_tot,0,axis=1))
oo_tot_mean = [np.mean(oo_tot[uu]) for uu in xrange(len(oo_tot))]
oo_tot_std = [np.std(oo_tot[uu])/np.sqrt(len(halo_frac)) for uu in \
xrange(len(oo_tot))]
mean_mock_halo_frac[bin_str] = [oo_tot_mean,oo_tot_std]
###############################################################################
def plot_halo_frac(bin_centers,y_vals,ax,plot_idx):
titles = [1,2,3,5,10,20]
ax.set_xlim(9.1,11.9)
ax.set_xticks(np.arange(9.5,12.,0.5))
ax.tick_params(axis='x', which='major', labelsize=16)
title_here = 'n = {0}'.format(titles[plot_idx])
ax.text(0.05, 0.95, title_here,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=18)
if plot_idx == 4:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=20)
ax.plot(bin_centers,y_vals,color='silver')
def plot_mean_halo_frac(bin_centers,mean_vals,ax,std):
ax.errorbar(bin_centers,mean_vals,yerr=std,color='deeppink')
###############################################################################
nrow = int(2)
ncol = int(3)
fig,axes = plt.subplots(nrows=nrow,ncols=ncol,\
figsize=(100,200),sharex=True)
axes_flat = axes.flatten()
zz = int(0)
while zz <=4:
for jj in neigh_vals:
for kk in range(len(halo_frac)):
plot_halo_frac(bin_centers,halo_frac[kk][jj],axes_flat[zz],zz)
nn_str = '{0}'.format(jj)
plot_mean_halo_frac(bin_centers,mean_mock_halo_frac[nn_str][0],axes_flat[zz],mean_mock_halo_frac[nn_str][1])
zz += 1
plt.subplots_adjust(top=0.97,bottom=0.1,left=0.03,right=0.99,hspace=0.10,wspace=0.12)
plt.show()
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
##Creating dictionaries through for loops to house the parameters for each of
#ECO's 18 different options (6 nn and 3 density cuts)
#One dictionary for the lower portion of the cuts and one for the higher
param_dict_low = {}
param_dict_high = {}
for dd in neigh_vals:
param_dict_low[dd] = {}
param_dict_high[dd] = {}
for ee in frac_vals:
param_dict_low[dd][ee] = {}
param_dict_high[dd][ee] = {}
opt_v, param_dict_low[dd][ee] = param_finder(eco_low[dd][ee],\
bin_centers)
opt_v, param_dict_high[dd][ee] = param_finder(eco_high[dd][ee],\
bin_centers)
# #### Putting the percentile cuts in order, as seen below
# #10,25,low_50,high_50,75,90
# over_alpha_dict = {}
# over_log_m_star = {}
# for dd in neigh_vals:
# temp_list_alpha = []
# temp_list_logMstar = []
# over_alpha_dict[dd] = {}
# over_log_m_star[dd] = {}
# low_idx = np.array(list(reversed(np.sort(param_dict_low[dd].keys()))))
# high_idx = np.sort(param_dict_high[dd].keys())
# for ff in range(len(low_idx)):
# temp_list_alpha.append(param_dict_low[dd][low_idx[ff]][0])
# temp_list_logMstar.append(param_dict_low[dd][low_idx[ff]][1])
# for ff in range(len(high_idx)):
# temp_list_alpha.append(param_dict_high[dd][high_idx[ff]][0])
# temp_list_logMstar.append(param_dict_high[dd][high_idx[ff]][1])
# over_alpha_dict[dd] = temp_list_alpha
# over_log_m_star[dd] = temp_list_logMstar
# perc_arr = (10,25,49,51,75,90)
# fig,ax = plt.subplots()
# for jj in neigh_vals:
# ax.plot(perc_arr,over_log_m_star[jj],marker='o',label='{0}'.format(jj), \
# linestyle='--')
# ax.set_xlim([0,100])
# ax.legend(loc='best', numpoints=1)
# ax.set_xlabel('Percentile')
# ax.set_ylabel(r'$\log\ M_{*}$')
# plt.show()
# fig,ax = plt.subplots()
# for jj in neigh_vals:
# ax.plot(perc_arr,over_alpha_dict[jj],marker='o',label='{0}'.format(jj), \
# linestyle='--')
# ax.set_xlim([0,100])
# ax.legend(loc='best', numpoints=1)
# ax.set_xlabel('Percentile')
# ax.set_ylabel(r'$\alpha$')
# plt.show()
### moving around the parameters so that I can find the differences, rather
#than just plotting them straigh-up
# diff_dict_m_star = {}
# diff_dict_alpha = {}
# for dd in neigh_vals:
# diff_dict_m_star[dd] = {}
# diff_dict_alpha[dd] = {}
# for jj in frac_vals:
# temp_list_diff_m_star = []
# temp_list_diff_alpha = []
# diff_dict_alpha[dd][jj] = {}
# diff_dict_m_star[dd][jj] = {}
# temp_list_diff_m_star.append((param_dict_high[dd][jj][1] - \
# param_dict_low[dd][jj][1]))
# temp_list_diff_alpha.append(((param_dict_high[dd][jj][0]-\
# param_dict_low[dd][jj][0])/param_dict_high[dd][jj][0] * 100))
# diff_dict_alpha[dd][jj] = np.array(temp_list_diff_alpha)
# diff_dict_m_star[dd][jj] = np.array(temp_list_diff_m_star)
# dict_revamp_mstar = {}
# for dd in neigh_vals:
# dict_revamp_mstar[dd] = []
# for jj in frac_vals:
# dict_revamp_mstar[dd].append(diff_dict_m_star[dd][jj])
# dict_revamp_alpha = {}
# for dd in neigh_vals:
# dict_revamp_alpha[dd] = []
# for jj in frac_vals:
# dict_revamp_alpha[dd].append(diff_dict_alpha[dd][jj])
# discrete_x = np.array([1,2,3])
# fig,ax = plt.subplots()
# for ii in neigh_vals:
# ax.plot(discrete_x,dict_revamp_mstar[ii],marker='o',\
# linestyle= '--',label='{0}'.format(ii))
# ax.set_xlim(0,4)
# ax.set_xlabel('Fractional Cut',fontsize=18)
# ax.set_xticks([1,2,3])
# ax.set_ylabel('Difference in $\log\ M_{*}$, h-l',fontsize=18)
# ax.legend(loc='best',numpoints=1)
# ax.text(1,0.5,'50/50 Cut',horizontalalignment='center')
# ax.text(2,0.6,'25/75 Cut',horizontalalignment='center')
# ax.text(3,0.75,'10/90 Cut',horizontalalignment='center')
# plt.show()
# ######
# fig,ax = plt.subplots()
# for ii in neigh_vals:
# ax.plot(discrete_x,dict_revamp_alpha[ii],marker='o',\
# linestyle= '--',label='{0}'.format(ii))
# ax.set_xlim(0,4)
# ax.set_xlabel('Fractional Cut',fontsize=18)
# ax.set_xticks([1,2,3])
# ax.set_ylabel(r'Difference in $\alpha$, (h-l)/h',fontsize=18)
# ax.legend(loc='best',numpoints=1)
# ax.text(1,-7,'50/50 Cut',horizontalalignment='center')
# ax.text(2,-7,'25/75 Cut',horizontalalignment='center')
# ax.text(3,-7,'10/90 Cut',horizontalalignment='center')
# plt.show()
#50/50,25/75,10/908
# mocks_high_alpha = {}
# mocks_high_logMstar = {}
# mocks_low_alpha = {}
# mocks_low_logMstar = {}
# for rr in xrange(len(hist_high_info)):
# mocks_high_alpha[rr] = {}
# mocks_high_logMstar[rr] = {}
# mocks_low_alpha[rr] = {}
# mocks_low_logMstar[rr] = {}
# for ss in neigh_vals:
# mocks_high_alpha[rr][ss] = {}
# mocks_high_logMstar[rr][ss] = {}
# mocks_low_alpha[rr][ss] = {}
# mocks_low_logMstar[rr][ss] = {}
# for tt in frac_vals:
# opt_v, temp_res_high = param_finder(hist_high_info[rr][ss][tt],\
# bin_centers)
# opt_v, temp_res_low = param_finder(hist_low_info[rr][ss][tt],\
# bin_centers)
# mocks_high_alpha[rr][ss][tt] = temp_res_high[0]
# mocks_high_logMstar[rr][ss][tt] = temp_res_high[1]
# mocks_low_alpha[rr][ss][tt] = temp_res_low[0]
# mocks_low_logMstar[rr][ss][tt] = temp_res_low[1] | mit |
xujun10110/folium | tests/folium_tests.py | 1 | 21100 | # -*- coding: utf-8 -*-
"""
Folium Tests
-------
"""
import json
import mock
import pandas as pd
import nose.tools as nt
import jinja2
from jinja2 import Environment, PackageLoader
import vincent
import folium
from folium.six import PY3
def setup_data():
"""Import economic data for testing."""
with open('us-counties.json', 'r') as f:
get_id = json.load(f)
county_codes = [x['id'] for x in get_id['features']]
county_df = pd.DataFrame({'FIPS_Code': county_codes}, dtype=str)
# Read into Dataframe, cast to string for consistency.
df = pd.read_csv('us_county_data.csv', na_values=[' '])
df['FIPS_Code'] = df['FIPS_Code'].astype(str)
# Perform an inner join, pad NA's with data from nearest county.
merged = pd.merge(df, county_df, on='FIPS_Code', how='inner')
return merged.fillna(method='pad')
def test_get_templates():
"""Test template getting."""
env = folium.utilities.get_templates()
nt.assert_is_instance(env, jinja2.environment.Environment)
class testFolium(object):
"""Test class for the Folium library."""
def setup(self):
"""Setup Folium Map."""
with mock.patch('folium.folium.uuid4') as uuid4:
uuid4().hex = '0' * 32
self.map = folium.Map(location=[45.5236, -122.6750], width=900,
height=400, max_zoom=20, zoom_start=4)
self.env = Environment(loader=PackageLoader('folium', 'templates'))
def test_init(self):
"""Test map initialization."""
assert self.map.map_type == 'base'
assert self.map.mark_cnt == {}
assert self.map.location == [45.5236, -122.6750]
assert self.map.map_size == {'width': 900, 'height': 400}
tmpl = {'Tiles': 'https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',
'attr': ('Map data (c) <a href="http://openstreetmap.org">'
'OpenStreetMap</a> contributors'),
'map_id': 'folium_' + '0' * 32,
'lat': 45.5236,
'lon': -122.675,
'max_zoom': 20,
'size': 'style="width: 900px; height: 400px"',
'zoom_level': 4,
'tile_layers': [],
'wms_layers': [],
'image_layers': [],
'min_zoom': 1,
'min_lat': -90,
'max_lat': 90,
'min_lon': -180,
'max_lon': 180}
assert self.map.template_vars == tmpl
def test_cloudmade(self):
"""Test cloudmade tiles and the API key."""
nt.assert_raises(ValueError, callableObj=folium.Map,
location=[45.5236, -122.6750], tiles='cloudmade')
map = folium.Map(location=[45.5236, -122.6750], tiles='cloudmade',
API_key='###')
assert map.template_vars['Tiles'] == ('http://{s}.tile.cloudmade.com'
'/###/997/256/{z}/{x}/{y}.png')
def test_builtin_tile(self):
"""Test custom maptiles."""
default_tiles = ['OpenStreetMap', 'Stamen Terrain', 'Stamen Toner']
for tiles in default_tiles:
map = folium.Map(location=[45.5236, -122.6750], tiles=tiles)
tiles = ''.join(tiles.lower().strip().split())
url = map.tile_types[tiles]['templ'].render()
attr = map.tile_types[tiles]['attr'].render()
assert map.template_vars['Tiles'] == url
assert map.template_vars['attr'] == attr
def test_custom_tile(self):
"""Test custom tile URLs."""
url = 'http://{s}.custom_tiles.org/{z}/{x}/{y}.png'
attr = 'Attribution for custom tiles'
nt.assert_raises(ValueError, callableObj=folium.Map,
location=[45.5236, -122.6750], tiles=url)
map = folium.Map(location=[45.52, -122.67], tiles=url, attr=attr)
assert map.template_vars['Tiles'] == url
assert map.template_vars['attr'] == attr
def test_wms_layer(self):
"""Test WMS layer URLs."""
map = folium.Map(location=[44, -73], zoom_start=3)
wms_url = 'http://gis.srh.noaa.gov/arcgis/services/NDFDTemps/'
wms_url += 'MapServer/WMSServer'
wms_name = "Temperature"
wms_layers = 16
wms_format = "image/png"
map.add_wms_layer(wms_name=wms_name,
wms_url=wms_url,
wms_format=wms_format,
wms_layers=wms_layers,
wms_transparent=True)
wms_temp = self.env.get_template('wms_layer.js')
wms = wms_temp.render({'wms_name': wms_name,
'wms_url': wms_url,
'wms_format': wms_format,
'wms_layer_names': wms_layers,
'wms_transparent': 'true'})
assert map.template_vars['wms_layers'][0] == wms
def test_simple_marker(self):
"""Test simple marker addition."""
mark_templ = self.env.get_template('simple_marker.js')
popup_templ = self.env.get_template('simple_popup.js')
# Single Simple marker.
self.map.simple_marker(location=[45.50, -122.7])
mark_1 = mark_templ.render({'marker': 'marker_1', 'lat': 45.50,
'lon': -122.7,
'icon': "{'icon':marker_1_icon}"})
assert self.map.template_vars['custom_markers'][0][1] == mark_1
assert self.map.template_vars['custom_markers'][0][2] == ""
# Test Simple marker addition.
self.map.simple_marker(location=[45.60, -122.8], popup='Hi')
mark_2 = mark_templ.render({'marker': 'marker_2', 'lat': 45.60,
'lon': -122.8,
'icon': "{'icon':marker_2_icon}"})
popup_2 = popup_templ.render({'pop_name': 'marker_2',
'pop_txt': json.dumps('Hi'),
'width': 300})
assert self.map.mark_cnt['simple'] == 2
assert self.map.template_vars['custom_markers'][1][1] == mark_2
assert self.map.template_vars['custom_markers'][1][2] == popup_2
# Test no popup.
self.map.simple_marker(location=[45.60, -122.8])
nopopup = ''
assert self.map.template_vars['custom_markers'][2][2] == nopopup
def test_circle_marker(self):
"""Test circle marker additions."""
circ_templ = self.env.get_template('circle_marker.js')
# Single Circle marker.
self.map.circle_marker(location=[45.60, -122.8], popup='Hi')
circle_1 = circ_templ.render({'circle': 'circle_1', 'lat': 45.60,
'lon': -122.8, 'radius': 500,
'line_color': 'black',
'fill_color': 'black',
'fill_opacity': 0.6})
assert self.map.template_vars['markers'][0][0] == circle_1
# Second circle marker.
self.map.circle_marker(location=[45.70, -122.9], popup='Hi')
circle_2 = circ_templ.render({'circle': 'circle_2', 'lat': 45.70,
'lon': -122.9, 'radius': 500,
'line_color': 'black',
'fill_color': 'black',
'fill_opacity': 0.6})
assert self.map.template_vars['markers'][1][0] == circle_2
def test_poly_marker(self):
"""Test polygon marker."""
poly_temp = self.env.get_template('poly_marker.js')
polygon = poly_temp.render({'marker': 'polygon_1',
'lat': 45.5,
'lon': -122.5,
'line_color': 'black',
'line_opacity': 1,
'line_weight': 2,
'fill_color': 'blue',
'fill_opacity': 1,
'num_sides': 4,
'rotation': 0,
'radius': 15})
self.map.polygon_marker(location=[45.5, -122.5])
assert self.map.template_vars['markers'][0][0] == polygon
def test_latlng_pop(self):
"""Test lat/lon popovers."""
self.map.lat_lng_popover()
pop_templ = self.env.get_template('lat_lng_popover.js').render()
assert self.map.template_vars['lat_lng_pop'] == pop_templ
def test_click_for_marker(self):
"""Test click for marker functionality."""
# Lat/lon popover.
self.map.click_for_marker()
click_templ = self.env.get_template('click_for_marker.js')
click = click_templ.render({'popup': ('"Latitude: " + lat + "<br>'
'Longitude: " + lng ')})
assert self.map.template_vars['click_pop'] == click
# Custom popover.
self.map.click_for_marker(popup='Test')
click_templ = self.env.get_template('click_for_marker.js')
click = click_templ.render({'popup': '"Test"'})
assert self.map.template_vars['click_pop'] == click
def test_vega_popup(self):
"""Test vega popups."""
vis = vincent.Bar(width=675 - 75, height=350 - 50, no_data=True)
self.map.simple_marker(location=[45.60, -122.8],
popup=(vis, 'vis.json'))
popup_temp = self.env.get_template('vega_marker.js')
vega = popup_temp.render({'mark': 'marker_1', 'div_id': 'vis',
'width': 675, 'height': 350,
'max_width': 900,
'json_out': 'vis.json',
'vega_id': '#vis'})
assert self.map.template_vars['custom_markers'][0][2] == vega
def test_geo_json(self):
"""Test geojson method."""
path = 'us-counties.json'
geo_path = ".defer(d3.json, '{0}')".format(path)
# No data binding.
self.map.geo_json(geo_path=path)
geo_path = ".defer(d3.json, '{0}')".format(path)
map_var = 'gjson_1'
layer_var = 'gjson_1'
style_temp = self.env.get_template('geojson_style.js')
style = style_temp.render({'style': 'style_1',
'line_color': 'black',
'line_weight': 1,
'line_opacity': 1,
'fill_color': 'blue',
'fill_opacity': 0.6})
layer = ('gJson_layer_{0} = L.geoJson({1}, {{style: {2},'
'onEachFeature: onEachFeature}}).addTo(map)'
.format(1, layer_var, 'style_1'))
templ = self.map.template_vars
assert self.map.map_type == 'geojson'
assert templ['func_vars'][0] == map_var
assert templ['geo_styles'][0] == style
assert templ['gjson_layers'][0] == layer
assert templ['json_paths'][0] == geo_path
# Data binding incorrect color value error.
data = setup_data()
nt.assert_raises(ValueError, self.map.geo_json,
path, data=data,
columns=['FIPS_Code', 'Unemployed_2011'],
key_on='feature.id', fill_color='blue')
# Data binding threshold_scale too long.
data = setup_data()
nt.assert_raises(ValueError, self.map.geo_json,
path, data=data,
columns=['FIPS_Code', 'Unemployed_2011'],
key_on='feature.id',
threshold_scale=[1, 2, 3, 4, 5, 6, 7],
fill_color='YlGnBu')
# With DataFrame data binding, default threshold scale.
self.map.geo_json(geo_path=path, data=data,
columns=['FIPS_Code', 'Unemployed_2011'],
key_on='feature.id', fill_color='YlGnBu',
reset=True)
geo_path = ".defer(d3.json, '{0}')".format(path)
data_path = ".defer(d3.json, '{0}')".format('data.json')
map_var = 'gjson_1'
layer_var = 'gjson_1'
data_var = 'data_1'
domain = [4.0, 1000.0, 3000.0, 5000.0, 9000.0]
palette = folium.utilities.color_brewer('YlGnBu')
d3range = palette[0: len(domain) + 1]
color_temp = self.env.get_template('d3_threshold.js')
scale = color_temp.render({'domain': domain,
'range': d3range})
style_temp = self.env.get_template('geojson_style.js')
color = 'color(matchKey(feature.id, data_1))'
style = style_temp.render({'style': 'style_1',
'line_color': 'black',
'line_weight': 1,
'line_opacity': 1,
'quantize_fill': color,
'fill_opacity': 0.6})
layer = ('gJson_layer_{0} = L.geoJson({1}, {{style: {2},'
'onEachFeature: onEachFeature}}).addTo(map)'
.format(1, layer_var, 'style_1'))
templ = self.map.template_vars
assert templ['func_vars'] == [data_var, map_var]
assert templ['geo_styles'][0] == style
assert templ['gjson_layers'][0] == layer
assert templ['json_paths'] == [data_path, geo_path]
assert templ['color_scales'][0] == scale
# Adding TopoJSON as additional layer.
path_2 = 'or_counties_topo.json'
self.map.geo_json(geo_path=path_2, topojson='objects.or_counties_geo')
geo_path_2 = ".defer(d3.json, '{0}')".format(path_2)
map_var_2 = 'tjson_2'
layer_var_2 = 'topo_2'
topo_func = ('topo_2 = topojson.feature(tjson_2,'
' tjson_2.objects.or_counties_geo);')
fmt = ('gJson_layer_{0} = L.geoJson({1}, {{style: {2},'
'onEachFeature: onEachFeature}}).addTo(map)')
layer_2 = fmt.format(2, layer_var_2, 'style_2')
templ = self.map.template_vars
assert templ['func_vars'] == [data_var, map_var, map_var_2]
assert templ['gjson_layers'][1] == layer_2
assert templ['json_paths'] == [data_path, geo_path, geo_path_2]
assert templ['topo_convert'][0] == topo_func
def test_map_build(self):
"""Test map build."""
# Standard map.
self.map._build_map()
html_templ = self.env.get_template('fol_template.html')
tmpl = {'Tiles': 'https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',
'attr': ('Map data (c) <a href="http://openstreetmap.org">'
'OpenStreetMap</a> contributors'),
'map_id': 'folium_' + '0' * 32,
'lat': 45.5236, 'lon': -122.675, 'max_zoom': 20,
'size': 'style="width: 900px; height: 400px"',
'zoom_level': 4,
'min_zoom': 1,
'min_lat': -90,
'max_lat': 90,
'min_lon': -180,
'max_lon': 180}
HTML = html_templ.render(tmpl)
assert self.map.HTML == HTML
def test_tile_attr_unicode(self):
"""Test tile attribution unicode
Test not cover b'юникод'
because for python 3 bytes can only contain ASCII literal characters.
"""
if not PY3:
map = folium.Map(location=[45.5236, -122.6750],
tiles='test', attr=b'unicode')
map._build_map()
else:
map = folium.Map(location=[45.5236, -122.6750],
tiles='test', attr=u'юникод')
map._build_map()
map = folium.Map(location=[45.5236, -122.6750],
tiles='test', attr='юникод')
map._build_map()
def test_create_map(self):
"""Test create map."""
map = folium.Map(location=[45.5236, -122.6750],
tiles='test', attr='юникод')
# Add json data.
path = 'us-counties.json'
data = setup_data()
map.geo_json(geo_path=path, data=data,
columns=['FIPS_Code', 'Unemployed_2011'],
key_on='feature.id', fill_color='YlGnBu',
reset=True)
# Add plugins.
map.polygon_marker(location=[45.5, -122.5])
# Test write.
map.create_map()
def test_line(self):
"""Test line."""
line_temp = self.env.get_template('polyline.js')
line_opts = {
'color': 'blue',
'weight': 2,
'opacity': 1
}
locations = [
[[45.5236, -122.6750], [45.5236, -122.6751]],
[[45.5237, -122.6750], [45.5237, -122.6751]],
[[45.5238, -122.6750], [45.5238, -122.6751]]
]
line_rendered = line_temp.render({'line': 'line_1',
'locations': locations,
'options': line_opts})
self.map.line(locations=locations,
line_color=line_opts['color'],
line_weight=line_opts['weight'],
line_opacity=line_opts['opacity'])
assert self.map.template_vars['lines'][0][0] == line_rendered
def test_multi_polyline(self):
"""Test multi_polyline."""
multiline_temp = self.env.get_template('multi_polyline.js')
multiline_opts = {'color': 'blue',
'weight': 2,
'opacity': 1}
locations = [[[45.5236, -122.6750], [45.5236, -122.6751]],
[[45.5237, -122.6750], [45.5237, -122.6751]],
[[45.5238, -122.6750], [45.5238, -122.6751]]]
multiline_rendered = multiline_temp.render({'multiline': 'multiline_1',
'locations': locations,
'options': multiline_opts})
self.map.multiline(locations=locations,
line_color=multiline_opts['color'],
line_weight=multiline_opts['weight'],
line_opacity=multiline_opts['opacity'])
assert self.map.template_vars['multilines'][0][0] == multiline_rendered
def test_fit_bounds(self):
"""Test fit_bounds."""
bounds = ((52.193636, -2.221575), (52.636878, -1.139759))
fit_bounds_tpl = self.env.get_template('fit_bounds.js')
fit_bounds_rendered = fit_bounds_tpl.render({
'bounds': json.dumps(bounds),
'fit_bounds_options': {}, })
self.map.fit_bounds(bounds)
assert self.map.template_vars['fit_bounds'] == fit_bounds_rendered
fit_bounds_tpl = self.env.get_template('fit_bounds.js')
fit_bounds_rendered = fit_bounds_tpl.render({
'bounds': json.dumps(bounds),
'fit_bounds_options': json.dumps({'maxZoom': 15,
'padding': (3, 3), }, sort_keys=True),
})
self.map.fit_bounds(bounds, max_zoom=15, padding=(3, 3))
assert self.map.template_vars['fit_bounds'] == fit_bounds_rendered
def test_image_overlay(self):
"""Test image overlay"""
from numpy.random import random
from folium.utilities import write_png
import base64
data = random((100,100))
png_str = write_png(data)
with open('data.png', 'wb') as f:
f.write(png_str)
inline_image_url = "data:image/png;base64,"+base64.b64encode(png_str).decode('utf-8')
image_tpl = self.env.get_template('image_layer.js')
image_name = 'Image_Overlay'
image_opacity = 0.25
image_url = 'data.png'
min_lon, max_lon, min_lat, max_lat = -90.0, 90.0, -180.0, 180.0
image_bounds = [[min_lon, min_lat], [max_lon, max_lat]]
image_rendered = image_tpl.render({'image_name': image_name,
'image_url': image_url,
'image_bounds': image_bounds,
'image_opacity': image_opacity
})
self.map.image_overlay(data, filename=image_url)
assert image_rendered in self.map.template_vars['image_layers']
image_rendered = image_tpl.render({'image_name': image_name,
'image_url': inline_image_url,
'image_bounds': image_bounds,
'image_opacity': image_opacity
})
self.map.image_overlay(data)
assert image_rendered in self.map.template_vars['image_layers']
| mit |
shnizzedy/SM_openSMILE | openSMILE_runSM/mhealthx/mhealthx/utilities.py | 1 | 9467 | #!/usr/bin/env python
"""
Utility functions.
Authors:
- Arno Klein, 2015-2016 ([email protected]) http://binarybottle.com
Copyright 2015-2016, Sage Bionetworks (sagebase.org), with later modifications:
Copyright 2016, Child Mind Institute (childmind.org), Apache v2.0 License
"""
def run_command(command, flag1='', arg1='', flags='', args=[],
flagn='', argn='', closing=''):
"""
Run a generic command.
Parameters
----------
command : string
name of command: "SMILExtract"
flag1 : string
optional first command line flag
arg1 : string
optional first argument, handy for iterating over in the pipeline
flags : string or list of strings
command line flags precede their respective args: ["-C", "-I", "-O"]
args : string or list of strings
command line arguments: ["config.conf", "input.wav", "output.csv"]
flagn : string
optional last command line flag
argn : string
optional last argument, handy for iterating over in the pipeline
closing : string
closing string in command
Returns
-------
command_line : string
full command line
args : list of strings
command line arguments
arg1 : string
optional first argument, handy for iterating over in the pipeline
argn : string
optional last argument, handy for iterating over in the pipeline
Examples
--------
>>> from mhealthx.utilities import run_command
>>> command = 'ls'
>>> flag1 = ''
>>> arg1 = ''
>>> flags = ['-l', '']
>>> args = ['/software', '/desk']
>>> flagn = ''
>>> argn = ''
>>> closing = '' #'> test.txt'
>>> command_line, args, arg1, argn = run_command(command, flag1, arg1, flags, args, flagn, argn, closing)
"""
from nipype.interfaces.base import CommandLine
# Join flags with args:
if type(flags) == list and type(args) == list:
flag_arg_tuples = list(zip(flags, args))
flags_args = ''
for flag_arg_tuple in flag_arg_tuples:
flags_args = ' '.join([flags_args, ' '.join(flag_arg_tuple)])
elif type(flags) == str and type(args) == str:
flags_args = ' '.join([flags, args])
else:
raise IOError("-flags and -args should both be strings or lists")
options = ' '.join([' '.join([flag1, arg1]), flags_args,
' '.join([flagn, argn]), closing])
command_line = ' '.join([command, options])
# Nipype command line wrapper:
try:
cli = CommandLine(command=command)
cli.inputs.args = options
cli.cmdline
cli.run()
except:
import traceback; traceback.print_exc()
print(("'{0}' unsuccessful".format(command_line)))
return command_line, args, arg1, argn
def plotxyz(x, y, z, t, title='', limits=[]):
"""
Plot each accelerometer axis separately against relative time.
Parameters
----------
x : list or numpy array of floats
y : list or numpy array of floats
z : list or numpy array of floats
t : list or numpy array of floats
time points
title : string
limits : list of floats
Examples
--------
>>> from mhealthx.xio import read_accel_json
>>> #input_file = '/Users/arno/DriveWork/mhealthx/mpower_sample_data/accel_walking_outbound.json.items-6dc4a144-55c3-4e6d-982c-19c7a701ca243282023468470322798.tmp'
>>> input_file = '/Users/arno/DriveWork/mhealthx/mpower_sample_data/deviceMotion_walking_outbound.json.items-5981e0a8-6481-41c8-b589-fa207bfd2ab38771455825726024828.tmp'
>>> #input_file = '/Users/arno/DriveWork/mhealthx/mpower_sample_data/deviceMotion_walking_outbound.json.items-a2ab9333-6d63-4676-977a-08591a5d837f5221783798792869048.tmp'
>>> start = 150
>>> device_motion = True
>>> t, axyz, gxyz, uxyz, rxyz, sample_rate, duration = read_accel_json(input_file, start, device_motion)
>>> ax, ay, az = axyz
>>> from mhealthx.utilities import plotxyz
>>> plotxyz(ax, ay, az, t, title='', limits=[])
"""
import numpy as np
import matplotlib.pyplot as plt
t -= np.min(t)
plt.figure()
plt.subplot(3, 1, 1)
plt.plot(t, x)
if limits:
plt.ylim((limits[0], limits[1]))
plt.title('x-axis ' + title)
plt.ylabel(title)
plt.subplot(3, 1, 2)
plt.plot(t, y)
if limits:
plt.ylim((limits[0], limits[1]))
plt.title('y-axis ' + title)
plt.ylabel(title)
plt.subplot(3, 1, 3)
plt.plot(t, z)
if limits:
plt.ylim((limits[0], limits[1]))
plt.title('z-axis ' + title)
plt.xlabel('Time (s)')
plt.ylabel(title)
plt.show()
def plotxyz3d(x, y, z, title=''):
"""
Plot accelerometer readings in 3-D.
(If trouble with "projection='3d'", try: ipython --pylab)
Parameters
----------
x : list or numpy array of floats
y : list or numpy array of floats
z : list or numpy array of floats
title : string
title
Examples
--------
>>> from mhealthx.xio import read_accel_json
>>> #input_file = '/Users/arno/DriveWork/mhealthx/mpower_sample_data/accel_walking_outbound.json.items-6dc4a144-55c3-4e6d-982c-19c7a701ca243282023468470322798.tmp'
>>> input_file = '/Users/arno/DriveWork/mhealthx/mpower_sample_data/deviceMotion_walking_outbound.json.items-5981e0a8-6481-41c8-b589-fa207bfd2ab38771455825726024828.tmp'
>>> #input_file = '/Users/arno/DriveWork/mhealthx/mpower_sample_data/deviceMotion_walking_outbound.json.items-a2ab9333-6d63-4676-977a-08591a5d837f5221783798792869048.tmp'
>>> start = 150
>>> device_motion = True
>>> t, axyz, gxyz, uxyz, rxyz, sample_rate, duration = read_accel_json(input_file, start, device_motion)
>>> x, y, z = axyz
>>> title = 'Test vectors'
>>> from mhealthx.utilities import plotxyz3d
>>> plotxyz3d(x, y, z, title)
"""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.gca(projection='3d')
#ax.plot(x, y, z) #, marker='o'
ax.plot(x[1::], y[1::], z[1::], label='x, y, z') #, marker='o'
ax.legend()
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
#ax.set_xlim3d(0, 1)
#ax.set_ylim3d(0, 1)
#ax.set_zlim3d(0, 1)
plt.xlabel('x')
plt.ylabel('y')
plt.zlabel('z')
plt.title(title)
plt.show()
def plot_vectors(x, y, z, hx=[], hy=[], hz=[], title=''):
"""
Plot vectors in 3-D from the origin [0,0,0].
(If trouble with "projection='3d'", try: ipython --pylab)
From: http://stackoverflow.com/questions/22867620/
putting-arrowheads-on-vectors-in-matplotlibs-3d-plot
Parameters
----------
x : list or numpy array of floats
x-axis data for vectors
y : list or numpy array of floats
y-axis data for vectors
z : list or numpy array of floats
z-axis data for vectors
hx : list or numpy array of floats
x-axis data for vectors to highlight
hy : list or numpy array of floats
y-axis data for vectors to highlight
hz : list or numpy array of floats
z-axis data for vectors to highlight
title : string
title
Examples
--------
>>> from mhealthx.xio import read_accel_json
>>> #input_file = '/Users/arno/DriveWork/mhealthx/mpower_sample_data/accel_walking_outbound.json.items-6dc4a144-55c3-4e6d-982c-19c7a701ca243282023468470322798.tmp'
>>> input_file = '/Users/arno/DriveWork/mhealthx/mpower_sample_data/deviceMotion_walking_outbound.json.items-5981e0a8-6481-41c8-b589-fa207bfd2ab38771455825726024828.tmp'
>>> #input_file = '/Users/arno/DriveWork/mhealthx/mpower_sample_data/deviceMotion_walking_outbound.json.items-a2ab9333-6d63-4676-977a-08591a5d837f5221783798792869048.tmp'
>>> start = 150
>>> device_motion = True
>>> t, axyz, gxyz, uxyz, rxyz, sample_rate, duration = read_accel_json(input_file, start, device_motion)
>>> x, y, z = axyz
>>> hx, hy, hz = [[0,1],[0,1],[0,1]]
>>> title = 'Test vectors'
>>> from mhealthx.utilities import plot_vectors
>>> plot_vectors(x, y, z, hx, hy, hz, title)
"""
import matplotlib.pyplot as plt
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
fig = plt.figure()
ax = fig.gca(projection='3d')
for i, x in enumerate(x):
a = Arrow3D([0, x], [0, y[i]], [0, z[i]],
mutation_scale=20, lw=1, arrowstyle="-|>", color="k")
ax.add_artist(a)
if hx:
for i, hx in enumerate(hx):
a = Arrow3D([0, hx], [0, hy[i]], [0, hz[i]],
mutation_scale=20, lw=1, arrowstyle="-|>", color="r")
ax.add_artist(a)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.title(title)
plt.draw()
plt.show()
def create_directory(path):
import os
if not os.path.exists(path):
os.makedirs(path)
print("Created directory: ", path)
| apache-2.0 |
achilleas-k/brian-scripts | my_hh.py | 1 | 2906 | #import matplotlib
#matplotlib.use('Agg')
from brian import *
from brian.library.ionic_currents import *
defaultclock.dt = dt = 0.1*ms
duration = 0.5*second
# Neuron parameters
Cm = 1*uF # /cm**2
gL = 0.1*msiemens
EL = -65*mV
ENa = 55*mV
EK = -90*mV
gNa = 35*msiemens
gK = 9*msiemens
threshold = EmpiricalThreshold(threshold=15*mV, refractory=2*ms)
# Input parameters
taue = 15*ms
taui = 5*ms
EExc = 0*mV
EInh = -80*mV
WExc = 80*nS
WInh = 50*nS
eqs='''
dv/dt=(-gNa*m**3*h*(v-ENa)-gK*n**4*(v-EK)-gL*(v-EL)-\
gExc*(v-EExc)-gInh*(v-EInh)+Iapp)/Cm : volt
m=alpham/(alpham+betam) : 1
alpham=-0.1/mV*(v+35*mV)/(exp(-0.1/mV*(v+35*mV))-1)/ms : Hz
betam=4*exp(-(v+60*mV)/(18*mV))/ms : Hz
dh/dt=5*(alphah*(1-h)-betah*h) : 1
alphah=0.07*exp(-(v+58*mV)/(20*mV))/ms : Hz
betah=1./(exp(-0.1/mV*(v+28*mV))+1)/ms : Hz
dn/dt=5*(alphan*(1-n)-betan*n) : 1
alphan=-0.01/mV*(v+34*mV)/(exp(-0.1/mV*(v+34*mV))-1)/ms : Hz
betan=0.125*exp(-(v+44*mV)/(80*mV))/ms : Hz
dgExc/dt = -gExc*(1./taue) : siemens
dgInh/dt = -gInh*(1./taui) : siemens
Iapp : amp
'''
neuron = NeuronGroup(2, eqs, threshold=threshold, method='RK')
# Inputs and connections
PExc_spikes = PoissonGroup(N=500, rates=8*Hz)
conn = Connection(PExc_spikes, neuron[0], 'gExc', weight=WExc)
PInh = PoissonInput(target=neuron[0], N=1000, rate=5*Hz,
weight=WInh, state='gInh')
#nsync = 200
#times = cumsum(rand(50))
#times = times/max(times)*(duration*0.95)
#input_times = [(i, t) for i in range(nsync) for t in times]
#inputs = SpikeGeneratorGroup(nsync, input_times)
#inpConn = Connection(inputs, neuron[1], state='gExc', weight=WExc)
pinput = PoissonInput(neuron[1], N=1, rate=10*Hz, weight=WExc, state='gExc',
copies=200, jitter=0.001)
# Init conditions
neuron.v = -65*mV
neuron.h = 1
# Monitors
inpmon = SpikeMonitor(PExc_spikes)
memmon = StateMonitor(neuron, 'v', record=True)
spikemon = SpikeMonitor(neuron)
excCondMon = StateMonitor(neuron, 'gExc', record=True)
inhCondMon = StateMonitor(neuron, 'gInh', record=True)
iappmon = StateMonitor(neuron, 'Iapp', record=True)
# Run
run(duration, report='text')
# Plotting
subplot(2,1,1)
raster_plot(inpmon)
title('Input spikes')
subplot(2,1,2)
plot(memmon.times/ms, memmon[0]/mV, color='black', label='V(t)')
title('Membrane')
xlabel("Time (ms)")
ylabel("Membrane potential (mV)")
#ax1.scatter(spikemon[0], ones(len(spikemon[0]))*25*mV, s=40, marker='*')
#ax1.legend(loc='upper left')
#ax2 = twinx()
#ax2.plot(excCondMon.times, excCondMon[0],
# linestyle='--', color='green', label='gExc')
#ax2.plot(inhCondMon.times, -1*inhCondMon[0],
# linestyle='--', color='red', label='gInh')
#ax2.legend(loc='upper right')
#print(spikemon[0])
#print('Firing rate: %f Hz' % (spikemon.nspikes/duration))
show()
#plot(memmon.times, memmon[0])
#plot(memmon.times, memmon[1])
#savefig('figure.png')
| apache-2.0 |
dingliumath/quant-econ | quantecon/tests/test_quad.py | 7 | 16564 | """
Filename: test_quad.py
Authors: Spencer Lyon
Date: 2014-07-02
Tests for quantecon.quad module
Notes
-----
Many of tests were derived from the file demqua## in the CompEcon
toolbox.
For all other tests, the MATLAB code is provided here in
a section of comments.
"""
from __future__ import division
import os
import unittest
from scipy.io import loadmat
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
from quantecon.quad import *
from quantecon.tests.util import get_data_dir
### MATLAB code needed to generate data (in addition to a modified demqua03)
# % set random number seed so we get the same random nums as in python
# rng(42)
# % 1-d parameters -- just some random numbers
# a = -2.0
# b = 3.0
# n = 11
# % 3-d parameters -- just some random numbers
# a_3 = [-1.0 -2.0 1.0]
# b_3 = [1.0 12.0 1.5]
# n_3 = [7 5 9]
# mu_3d = [1.0 2.0 2.5]
# sigma2_3d = [1.0 0.1 0.0; 0.1 1.0 0.0; 0.0 0.0 1.2]
# % 1-d nodes and weights
# [x_cheb_1 w_cheb_1] = qnwcheb(n, a, b)
# [x_equiN_1 w_equiN_1] = qnwequi(n, a, b, 'N')
# [x_equiW_1 w_equiW_1] = qnwequi(n, a, b, 'W')
# [x_equiH_1 w_equiH_1] = qnwequi(n, a, b, 'H')
# rng(41); [x_equiR_1 w_equiR_1] = qnwequi(n, a, b, 'R')
# [x_lege_1 w_lege_1] = qnwlege(n, a, b)
# [x_norm_1 w_norm_1] = qnwnorm(n, a, b)
# [x_logn_1 w_logn_1] = qnwlogn(n, a, b)
# [x_simp_1 w_simp_1] = qnwsimp(n, a, b)
# [x_trap_1 w_trap_1] = qnwtrap(n, a, b)
# [x_unif_1 w_unif_1] = qnwunif(n, a, b)
# [x_beta_1 w_beta_1] = qnwbeta(n, b, b+1)
# [x_gamm_1 w_gamm_1] = qnwgamma(n, b)
# % 3-d nodes and weights
# [x_cheb_3 w_cheb_3] = qnwcheb(n_3, a_3, b_3)
# rng(42); [x_equiN_3 w_equiN_3] = qnwequi(n_3, a_3, b_3, 'N')
# [x_equiW_3 w_equiW_3] = qnwequi(n_3, a_3, b_3, 'W')
# [x_equiH_3 w_equiH_3] = qnwequi(n_3, a_3, b_3, 'H')
# [x_equiR_3 w_equiR_3] = qnwequi(n_3, a_3, b_3, 'R')
# [x_lege_3 w_lege_3] = qnwlege(n_3, a_3, b_3)
# [x_norm_3 w_norm_3] = qnwnorm(n_3, mu_3d, sigma2_3d)
# [x_logn_3 w_logn_3] = qnwlogn(n_3, mu_3d, sigma2_3d)
# [x_simp_3 w_simp_3] = qnwsimp(n_3, a_3, b_3)
# [x_trap_3 w_trap_3] = qnwtrap(n_3, a_3, b_3)
# [x_unif_3 w_unif_3] = qnwunif(n_3, a_3, b_3)
# [x_beta_3 w_beta_3] = qnwbeta(n_3, b_3, b_3+1.0)
# [x_gamm_3 w_gamm_3] = qnwgamma(n_3, b_3)
### End MATLAB commands
data_dir = get_data_dir()
data = loadmat(os.path.join(data_dir, "matlab_quad.mat"), squeeze_me=True)
# Unpack parameters from MATLAB
a = data['a']
b = data['b']
n = data['n']
a_3 = data['a_3']
b_3 = data['b_3']
n_3 = data['n_3']
mu_3d = data['mu_3d']
sigma2_3d = data['sigma2_3d']
class TestQuadrect(unittest.TestCase):
@classmethod
def setUpClass(cls):
## Create Python Data for quadrect
# Create the python data -- similar to notebook code
kinds = ["trap", "simp", "lege", "N", "W", "H", "R"]
# Define some functions
f1 = lambda x: np.exp(-x)
f2 = lambda x: 1.0 / (1.0 + 25.0 * x**2.0)
f3 = lambda x: np.abs(x) ** 0.5
func_names = ["f1", "f2", "f3"]
# Integration parameters
n = np.array([5, 11, 21, 51, 101, 401]) # number of nodes
np.random.seed(42) # same seed as ML code.
a, b = -1, 1 # endpoints
# Set up pandas DataFrame to hold results
ind = pd.MultiIndex.from_product([func_names, n])
ind.names = ["Function", "Number of Nodes"]
cols = pd.Index(kinds, name="Kind")
quad_rect_res1d = pd.DataFrame(index=ind, columns=cols, dtype=float)
for i, func in enumerate([f1, f2, f3]):
func_name = func_names[i]
for kind in kinds:
for num in n:
num_in = num ** 2 if len(kind) == 1 else num
quad_rect_res1d.ix[func_name, num][kind] = quadrect(func,
num_in,
a, b,
kind)
cls.data1d = quad_rect_res1d
# Now 2d data
kinds2 = ["lege", "trap", "simp", "N", "W", "H", "R"]
f1_2 = lambda x: np.exp(x[:, 0] + x[:, 1])
f2_2 = lambda x: np.exp(-x[:, 0] * np.cos(x[:, 1]**2))
# Set up pandas DataFrame to hold results
a = ([0, 0], [-1, -1])
b = ([1, 2], [1, 1])
ind_2 = pd.Index(n**2, name="Num Points")
cols2 = pd.Index(kinds2, name="Kind")
data2 = pd.DataFrame(index=ind_2, columns=cols2, dtype=float)
for num in n:
for kind in kinds2[:4]:
data2.ix[num**2][kind] = quadrect(f1_2, [num, num],
a[0], b[0], kind)
for kind in kinds2[4:]:
data2.ix[num**2][kind] = quadrect(f1_2, num**2, a[0],
b[0], kind)
cls.data2d1 = data2
n3 = 10 ** (2 + np.array([1, 2, 3]))
ind_3 = pd.Index(n3, name="Num Points")
cols3 = pd.Index(kinds2[3:])
data3 = pd.DataFrame(index=ind_3, columns=cols3, dtype=float)
for num in n3:
for kind in kinds2[3:]:
data3.ix[num][kind] = quadrect(f2_2, num, a[1], b[1], kind)
cls.data2d2 = data3
## Organize MATLAB Data
ml_data = pd.DataFrame(index=ind, columns=cols, dtype=float)
ml_data.iloc[:6, :] = data['int_1d'][:, :, 0]
ml_data.iloc[6:12, :] = data['int_1d'][:, :, 1]
ml_data.iloc[12:18, :] = data['int_1d'][:, :, 2]
ml_data2 = pd.DataFrame(index=ind_2, columns=cols2, dtype=float)
ml_data2.iloc[:, :] = data['int_2d1']
ml_data3 = pd.DataFrame(index=ind_3, columns=cols3, dtype=float)
ml_data3.iloc[:, :] = data['int_2d2']
cls.ml_data1d = ml_data
cls.ml_data2d1 = ml_data2
cls.ml_data2d2 = ml_data3
def test_quadrect_1d_lege(self):
assert_allclose(self.data1d['lege'], self.ml_data1d['lege'])
def test_quadrect_1d_trap(self):
assert_allclose(self.data1d['trap'], self.ml_data1d['trap'])
def test_quadrect_1d_simp(self):
assert_allclose(self.data1d['simp'], self.ml_data1d['simp'])
def test_quadrect_1d_R(self):
assert_allclose(self.data1d['R'], self.ml_data1d['R'])
def test_quadrect_1d_W(self):
assert_allclose(self.data1d['W'], self.ml_data1d['W'])
def test_quadrect_1d_N(self):
assert_allclose(self.data1d['N'], self.ml_data1d['N'])
def test_quadrect_1d_H(self):
assert_allclose(self.data1d['H'], self.ml_data1d['H'])
def test_quadrect_2d_lege(self):
assert_allclose(self.data2d1['lege'], self.ml_data2d1['lege'])
def test_quadrect_2d_trap(self):
assert_allclose(self.data2d1['trap'], self.ml_data2d1['trap'])
def test_quadrect_2d_simp(self):
assert_allclose(self.data2d1['simp'], self.ml_data2d1['simp'])
# NOTE: The R tests will fail in more than 1 dimension. This is a
# function of MATLAB and numpy storing arrays in different
# "order". See comment in TestQnwequiR.setUpClass
# def test_quadrect_2d_R(self):
# assert_allclose(self.data2d1['R'], self.ml_data2d1['R'])
def test_quadrect_2d_W(self):
assert_allclose(self.data2d1['W'], self.ml_data2d1['W'])
def test_quadrect_2d_N(self):
assert_allclose(self.data2d1['N'], self.ml_data2d1['N'])
def test_quadrect_2d_H(self):
assert_allclose(self.data2d1['H'], self.ml_data2d1['H'])
def test_quadrect_2d_W2(self):
assert_allclose(self.data2d2['W'], self.ml_data2d2['W'])
def test_quadrect_2d_N2(self):
assert_allclose(self.data2d2['N'], self.ml_data2d2['N'])
def test_quadrect_2d_H2(self):
assert_allclose(self.data2d2['H'], self.ml_data2d2['H'])
class TestQnwcheb(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_cheb_1, cls.w_cheb_1 = qnwcheb(n, a, b)
cls.x_cheb_3, cls.w_cheb_3 = qnwcheb(n_3, a_3, b_3)
def test_qnwcheb_nodes_1d(self):
assert_allclose(self.x_cheb_1, data['x_cheb_1'])
def test_qnwcheb_nodes_3d(self):
assert_allclose(self.x_cheb_3, data['x_cheb_3'])
def test_qnwcheb_weights_1d(self):
assert_allclose(self.w_cheb_1, data['w_cheb_1'])
def test_qnwcheb_weights_3d(self):
assert_allclose(self.w_cheb_3, data['w_cheb_3'])
class TestQnwequiN(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_equiN_1, cls.w_equiN_1 = qnwequi(n, a, b, "N")
cls.x_equiN_3, cls.w_equiN_3 = qnwequi(n_3, a_3, b_3, "N")
def test_qnwequiN_nodes_1d(self):
assert_allclose(self.x_equiN_1, data['x_equiN_1'])
def test_qnwequiN_nodes_3d(self):
assert_allclose(self.x_equiN_3, data['x_equiN_3'])
def test_qnwequiN_weights_1d(self):
assert_allclose(self.w_equiN_1, data['w_equiN_1'])
def test_qnwequiN_weights_3d(self):
assert_allclose(self.w_equiN_3, data['w_equiN_3'])
class TestQnwequiW(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_equiW_1, cls.w_equiW_1 = qnwequi(n, a, b, "W")
cls.x_equiW_3, cls.w_equiW_3 = qnwequi(n_3, a_3, b_3, "W")
def test_qnwequiW_nodes_1d(self):
assert_allclose(self.x_equiW_1, data['x_equiW_1'])
def test_qnwequiW_nodes_3d(self):
assert_allclose(self.x_equiW_3, data['x_equiW_3'])
def test_qnwequiW_weights_1d(self):
assert_allclose(self.w_equiW_1, data['w_equiW_1'])
def test_qnwequiW_weights_3d(self):
assert_allclose(self.w_equiW_3, data['w_equiW_3'])
class TestQnwequiH(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_equiH_1, cls.w_equiH_1 = qnwequi(n, a, b, "H")
cls.x_equiH_3, cls.w_equiH_3 = qnwequi(n_3, a_3, b_3, "H")
def test_qnwequiH_nodes_1d(self):
assert_allclose(self.x_equiH_1, data['x_equiH_1'])
def test_qnwequiH_nodes_3d(self):
assert_allclose(self.x_equiH_3, data['x_equiH_3'])
def test_qnwequiH_weights_1d(self):
assert_allclose(self.w_equiH_1, data['w_equiH_1'])
def test_qnwequiH_weights_3d(self):
assert_allclose(self.w_equiH_3, data['w_equiH_3'])
class TestQnwequiR(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.random.seed(41) # make sure to set seed here.
cls.x_equiR_1, cls.w_equiR_1 = qnwequi(n, a, b, "R")
np.random.seed(42) # make sure to set seed here.
temp, cls.w_equiR_3 = qnwequi(n_3, a_3, b_3, "R")
# NOTE: I need to do a little magic here. MATLAB and numpy
# are generating the same random numbers, but MATLAB is
# column major and numpy is row major, so they are stored
# in different places for multi-dimensional arrays.
# The ravel, reshape code here moves the numpy nodes into
# the same relative position as the MATLAB ones. Also, in
# order for this to work I have to undo the shifting of
# the nodes, re-organize data, then re-shift. If this
# seems like voodoo to you, it kinda is. But, the fact
# that the test can pass after this kind of manipulation
# is a strong indicator that we are doing it correctly
unshifted = (temp - a_3) / (b_3 - a_3)
reshaped = np.ravel(unshifted).reshape(315, 3, order='F')
reshifted = a_3 + reshaped * (b_3 - a_3)
cls.x_equiR_3 = reshifted
def test_qnwequiR_nodes_1d(self):
assert_allclose(self.x_equiR_1, data['x_equiR_1'])
def test_qnwequiR_nodes_3d(self):
assert_allclose(self.x_equiR_3, data['x_equiR_3'])
def test_qnwequiR_weights_1d(self):
assert_allclose(self.w_equiR_1, data['w_equiR_1'])
def test_qnwequiR_weights_3d(self):
assert_allclose(self.w_equiR_3, data['w_equiR_3'])
class TestQnwlege(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_lege_1, cls.w_lege_1 = qnwlege(n, a, b)
cls.x_lege_3, cls.w_lege_3 = qnwlege(n_3, a_3, b_3)
def test_qnwlege_nodes_1d(self):
assert_allclose(self.x_lege_1, data['x_lege_1'])
def test_qnwlege_nodes_3d(self):
assert_allclose(self.x_lege_3, data['x_lege_3'])
def test_qnwlege_weights_1d(self):
assert_allclose(self.w_lege_1, data['w_lege_1'])
def test_qnwlege_weights_3d(self):
assert_allclose(self.w_lege_3, data['w_lege_3'])
class TestQnwnorm(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_norm_1, cls.w_norm_1 = qnwnorm(n, a, b)
cls.x_norm_3, cls.w_norm_3 = qnwnorm(n_3, mu_3d, sigma2_3d)
def test_qnwnorm_nodes_1d(self):
assert_allclose(self.x_norm_1, data['x_norm_1'])
def test_qnwnorm_nodes_3d(self):
assert_allclose(self.x_norm_3, data['x_norm_3'])
def test_qnwnorm_weights_1d(self):
assert_allclose(self.w_norm_1, data['w_norm_1'])
def test_qnwnorm_weights_3d(self):
assert_allclose(self.w_norm_3, data['w_norm_3'])
class TestQnwlogn(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_logn_1, cls.w_logn_1 = qnwlogn(n, a, b)
cls.x_logn_3, cls.w_logn_3 = qnwlogn(n_3, mu_3d, sigma2_3d)
def test_qnwlogn_nodes_1d(self):
assert_allclose(self.x_logn_1, data['x_logn_1'])
def test_qnwlogn_nodes_3d(self):
assert_allclose(self.x_logn_3, data['x_logn_3'])
def test_qnwlogn_weights_1d(self):
assert_allclose(self.w_logn_1, data['w_logn_1'])
def test_qnwlogn_weights_3d(self):
assert_allclose(self.w_logn_3, data['w_logn_3'])
class TestQnwsimp(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_simp_1, cls.w_simp_1 = qnwsimp(n, a, b)
cls.x_simp_3, cls.w_simp_3 = qnwsimp(n_3, a_3, b_3)
def test_qnwsimp_nodes_1d(self):
assert_allclose(self.x_simp_1, data['x_simp_1'])
def test_qnwsimp_nodes_3d(self):
assert_allclose(self.x_simp_3, data['x_simp_3'])
def test_qnwsimp_weights_1d(self):
assert_allclose(self.w_simp_1, data['w_simp_1'])
def test_qnwsimp_weights_3d(self):
assert_allclose(self.w_simp_3, data['w_simp_3'])
class TestQnwtrap(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_trap_1, cls.w_trap_1 = qnwtrap(n, a, b)
cls.x_trap_3, cls.w_trap_3 = qnwtrap(n_3, a_3, b_3)
def test_qnwtrap_nodes_1d(self):
assert_allclose(self.x_trap_1, data['x_trap_1'])
def test_qnwtrap_nodes_3d(self):
assert_allclose(self.x_trap_3, data['x_trap_3'])
def test_qnwtrap_weights_1d(self):
assert_allclose(self.w_trap_1, data['w_trap_1'])
def test_qnwtrap_weights_3d(self):
assert_allclose(self.w_trap_3, data['w_trap_3'])
class TestQnwunif(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_unif_1, cls.w_unif_1 = qnwunif(n, a, b)
cls.x_unif_3, cls.w_unif_3 = qnwunif(n_3, a_3, b_3)
def test_qnwunif_nodes_1d(self):
assert_allclose(self.x_unif_1, data['x_unif_1'])
def test_qnwunif_nodes_3d(self):
assert_allclose(self.x_unif_3, data['x_unif_3'])
def test_qnwunif_weights_1d(self):
assert_allclose(self.w_unif_1, data['w_unif_1'])
def test_qnwunif_weights_3d(self):
assert_allclose(self.w_unif_3, data['w_unif_3'])
class TestQnwbeta(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_beta_1, cls.w_beta_1 = qnwbeta(n, b, b + 1.0)
cls.x_beta_3, cls.w_beta_3 = qnwbeta(n_3, b_3, b_3 + 1.0)
def test_qnwbeta_nodes_1d(self):
assert_allclose(self.x_beta_1, data['x_beta_1'])
def test_qnwbeta_nodes_3d(self):
assert_allclose(self.x_beta_3, data['x_beta_3'])
def test_qnwbeta_weights_1d(self):
assert_allclose(self.w_beta_1, data['w_beta_1'])
def test_qnwbeta_weights_3d(self):
assert_allclose(self.w_beta_3, data['w_beta_3'])
class TestQnwgamm(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_gamm_1, cls.w_gamm_1 = qnwgamma(n, b)
cls.x_gamm_3, cls.w_gamm_3 = qnwgamma(n_3, b_3)
def test_qnwgamm_nodes_1d(self):
assert_allclose(self.x_gamm_1, data['x_gamm_1'])
def test_qnwgamm_nodes_3d(self):
assert_allclose(self.x_gamm_3, data['x_gamm_3'])
def test_qnwgamm_weights_1d(self):
assert_allclose(self.w_gamm_1, data['w_gamm_1'])
def test_qnwgamm_weights_3d(self):
assert_allclose(self.w_gamm_3, data['w_gamm_3'])
| bsd-3-clause |
liuzz1983/open_vision | tools/validate_on_lfw.py | 1 | 5761 | """Validate a face recognizer on the "Labeled Faces in the Wild" dataset (http://vis-www.cs.umass.edu/lfw/).
Embeddings are calculated using the pairs from http://vis-www.cs.umass.edu/lfw/pairs.txt and the ROC curve
is calculated and plotted. Both the model metagraph and the model parameters need to exist
in the same directory, and the metagraph should have the extension '.meta'.
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import math
import argparse
import tensorflow as tf
import numpy as np
from sklearn import metrics
from scipy.optimize import brentq
from scipy import interpolate
from openvision.datasets import lfw
from openvision.utils import tf_util
from openvision.facenet import evaluate
from openvision.facenet import facenet
def main(args):
with tf.Graph().as_default():
with tf.Session() as sess:
# Read the file containing the pairs used for testing
#pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
pairs = lfw.gen_pairs(args.lfw_dir, pair_num=args.lfw_pair_num)
print(pairs)
# Get the paths for the corresponding images
paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
# Load the model
print('Model directory: %s' % args.model_dir)
tf_util.load_model(sess, args.model_dir)
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
image_size = images_placeholder.get_shape()[1]
embedding_size = embeddings.get_shape()[1]
# Run forward pass to calculate embeddings
print('Runnning forward pass on LFW images')
batch_size = args.lfw_batch_size
nrof_images = len(paths)
nrof_batches = int(math.ceil(1.0*nrof_images / batch_size))
emb_array = np.zeros((nrof_images, embedding_size))
for i in range(nrof_batches):
start_index = i*batch_size
end_index = min((i+1)*batch_size, nrof_images)
paths_batch = paths[start_index:end_index]
images = facenet.load_data(paths_batch, False, False, image_size)
feed_dict = { images_placeholder:images, phase_train_placeholder:False }
emb_array[start_index:end_index,:] = sess.run(embeddings, feed_dict=feed_dict)
print("begin to evaluate:", len(emb_array), " is_same:", len(actual_issame))
tpr, fpr, accuracy, val, val_std, far = evaluate.evaluate(emb_array,
actual_issame, nrof_folds=args.lfw_nrof_folds)
print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy)))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
auc = metrics.auc(fpr, tpr)
print('Area Under Curve (AUC): %1.3f' % auc)
eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)
print('Equal Error Rate (EER): %1.3f' % eer)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('lfw_dir', type=str,
help='Path to the data directory containing aligned LFW face patches.')
parser.add_argument('--lfw_batch_size', type=int,
help='Number of images to process in a batch in the LFW test set.', default=100)
parser.add_argument('model_dir', type=str,
help='Directory containing the metagraph (.meta) file and the checkpoint (ckpt) file containing model parameters')
parser.add_argument('--lfw_pair', type=str,
help='The file containing the pairs to use for validation.', default='data/pairs.txt')
parser.add_argument('--lfw_pair_num', type=int,
help='The file containing the pairs to use for validation.', default=200)
parser.add_argument('--lfw_file_ext', type=str,
help='The file extension for the LFW dataset.', default='jpg', choices=['jpg', 'png'])
parser.add_argument('--lfw_nrof_folds', type=int,
help='Number of folds to use for cross validation. Mainly used for testing.', default=10)
return parser.parse_args(argv)
if __name__ == '__main__':
args = parse_arguments(sys.argv[1:])
main(args)
| mit |
MechCoder/scikit-learn | examples/covariance/plot_outlier_detection.py | 36 | 5023 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates three
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
- using the Isolation Forest algorithm, which is based on random forests and
hence more adapted to large-dimensional settings, even if it performs
quite well in the examples below.
- using the Local Outlier Factor to measure the local deviation of a given
data point with respect to its neighbors by comparing their local density.
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
print(__doc__)
rng = np.random.RandomState(42)
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"Robust covariance": EllipticEnvelope(contamination=outliers_fraction),
"Isolation Forest": IsolationForest(max_samples=n_samples,
contamination=outliers_fraction,
random_state=rng),
"Local Outlier Factor": LocalOutlierFactor(
n_neighbors=35,
contamination=outliers_fraction)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 100), np.linspace(-7, 7, 100))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = -1
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(n_inliers // 2, 2) - offset
X2 = 0.3 * np.random.randn(n_inliers // 2, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model
plt.figure(figsize=(9, 7))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
if clf_name == "Local Outlier Factor":
y_pred = clf.fit_predict(X)
scores_pred = clf.negative_outlier_factor_
else:
clf.fit(X)
scores_pred = clf.decision_function(X)
y_pred = clf.predict(X)
threshold = stats.scoreatpercentile(scores_pred,
100 * outliers_fraction)
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
if clf_name == "Local Outlier Factor":
# decision_function is private for LOF
Z = clf._decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(2, 2, i + 1)
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=10),
loc='lower right')
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.suptitle("Outlier detection")
plt.show()
| bsd-3-clause |
ehudmagal/robotqcapp | Utils/RobotQAUtils/graphics/multipleYsTest.py | 2 | 4245 | import matplotlib.pyplot as plt
import sys
import os
lib_path = os.path.abspath(r'E:\Tamuz\Utils\RobotQAUtils')
sys.path.append(lib_path)
from Utils.RobotQAUtils.plateReader import *
from Utils.RobotQAUtils.classes import *
width = 0.35
def make_patch_spines_invisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.itervalues():
sp.set_visible(False)
def printMultipleYs(ds = None):
fig = plt.figure()
fig.subplots_adjust(right=0.75)
host = fig.add_subplot(111)
par1 = host.twinx()
par2 = host.twinx()
par3 = host.twinx()
par4 = host.twinx()
par5 = host.twinx()
par6 = host.twinx()
par7 = host.twinx()
# Offset the right spine of par2. The ticks and label have already been
# placed on the right by twinx above.
par1.spines["right"].set_position(("axes", float(1.0/7.0)))
par2.spines["right"].set_position(("axes",float(2.0/7.0)))
par3.spines["right"].set_position(("axes", float(3.0/7.0)))
par4.spines["right"].set_position(("axes", float(4.0/7.0)))
par5.spines["right"].set_position(("axes", float(5.0/7.0)))
par6.spines["right"].set_position(("axes", float(6.0/7.0)))
par7.spines["right"].set_position(("axes", float(7.0/7.0)))
# Having been created by twinx, par2 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(par1)
make_patch_spines_invisible(par2)
make_patch_spines_invisible(par3)
make_patch_spines_invisible(par4)
make_patch_spines_invisible(par5)
make_patch_spines_invisible(par6)
make_patch_spines_invisible(par7)
# Second, show the right spine.
par1.spines["right"].set_visible(True)
par2.spines["right"].set_visible(True)
par3.spines["right"].set_visible(True)
par4.spines["right"].set_visible(True)
par5.spines["right"].set_visible(True)
par6.spines["right"].set_visible(True)
par7.spines["right"].set_visible(True)
for idx,d in enumerate(ds):
manualDeviationPercent,robotdeviationPercents= d.getManualAndRobotDeviationPercent()
manualDeviationPercent*=10#enlarging the scale of the robot
manualMean = d.getManualMean()*100
robotMean = d.getRobotMean()*100
manualUniqueWellsPercent = d.getUniqueWellsCompareToOutsideMeanPercent(outsideMean = d.getManualMean(),rob=False)
robotUniqueWellsPercent = d.getUniqueWellsCompareToOutsideMeanPercent(outsideMean = d.getRobotMean(),rob=True)
if not idx%7:
color = 'r-'
elif not idx%6:
color = 'b-'
elif not idx%5:
color ='g-'
elif not idx%4:
color = 'y-'
elif not idx%3:
color = 'y-'
elif not idx%2:
color = 'b-'
else:
color = 'g-'
host.plot([0, 1, 2,3,4,5,6,7], [idx,d.getManualColorVolume(), manualDeviationPercent, robotdeviationPercents,manualMean,robotMean,manualUniqueWellsPercent,robotUniqueWellsPercent],color)
host.set_xlim(0, 7)
host.set_ylim(0, 100)
par1.set_ylim(0, 100)
par2.set_ylim([0, 10])
par3.set_ylim(0, 100)
par4.set_ylim([0, 1])
par5.set_ylim([0,1])
par6.set_ylim([0, 1])
par7.set_ylim(0, 100)
host.set_xlabel("a 4 axis graph")
host.set_ylabel("expiriment index")
par1.set_ylabel("volume")
par2.set_ylabel("manual deviation percent")
par3.set_ylabel("robot deviation percent")
par4.set_ylabel("manual mean")
par5.set_ylabel("robot mean")
par6.set_ylabel("manual unique wells percent")
par7.set_ylabel("robot unique wells percent")
# host.yaxis.label.set_color(p1.get_color())
#par1.yaxis.label.set_color(p2.get_color())
#par2.yaxis.label.set_color(p3.get_color())
tkw = dict(size=5, width=1.5)
host.tick_params(axis='y', **tkw)
par1.tick_params(axis='y', **tkw)
par2.tick_params(axis='y', **tkw)
par3.tick_params(axis='y', **tkw)
host.tick_params(axis='x', **tkw)
#lines = [p1, p2, p3]
#host.legend(lines, [l.get_label() for l in lines])
plt.show()
| bsd-3-clause |
carrillo/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
tmhm/scikit-learn | sklearn/datasets/mldata.py | 309 | 7838 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
OceansAus/cosima-cookbook | cosima_cookbook/plots/overturning.py | 1 | 2545 | import cosima_cookbook as cc
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm_notebook
import IPython.display
def psi_avg(expts, n=10, clev=np.arange(-20,20,2)):
if not isinstance(expts, list):
expts = [expts]
# computing
results = []
for expt in tqdm_notebook(expts, leave=False, desc='experiments'):
psi_avg = cc.diagnostics.psi_avg(expt, n)
result = {'psi_avg': psi_avg,
'expt': expt}
results.append(result)
IPython.display.clear_output()
# plotting
for result in results:
psi_avg = result['psi_avg']
expt = result['expt']
plt.figure(figsize=(10, 5))
plt.contourf(psi_avg.grid_yu_ocean,
psi_avg.potrho, psi_avg,
cmap=plt.cm.PiYG,levels=clev,extend='both')
cb=plt.colorbar(orientation='vertical', shrink = 0.7)
cb.ax.set_xlabel('Sv')
plt.contour(psi_avg.grid_yu_ocean, psi_avg.potrho, psi_avg, levels=clev, colors='k', linewidths=0.25)
plt.contour(psi_avg.grid_yu_ocean, psi_avg.potrho, psi_avg, levels=[0.0,], colors='k', linewidths=0.5)
plt.gca().invert_yaxis()
plt.ylim((1037.5,1034))
plt.ylabel('Potential Density (kg m$^{-3}$)')
plt.xlabel('Latitude ($^\circ$N)')
plt.xlim([-75,85])
plt.title('Overturning in %s' % expt)
def zonal_mean(expts,variable,n=10,resolution=1):
if not isinstance(expts, list):
expts = [expts]
# computing
results = []
for expt in tqdm_notebook(expts, leave=False, desc='experiments'):
zonal_mean, zonal_diff = cc.diagnostics.zonal_mean(expt,variable,n,resolution)
result = {'zonal_mean': zonal_mean,
'zonal_diff': zonal_diff,
'expt': expt}
results.append(result)
IPython.display.clear_output()
# plotting
for result in results:
zonal_mean = result['zonal_mean']
zonal_diff = result['zonal_diff']
expt = result['expt']
plt.figure(figsize=(12,5))
plt.subplot(121)
zonal_mean.plot()
plt.title(expt)
plt.gca().invert_yaxis()
plt.title('{}: Zonal Mean {}'.format(expt, variable))
plt.subplot(122)
zonal_diff.plot()
plt.title(expt)
plt.gca().invert_yaxis()
plt.title('{}: Zonal Mean {} Change'.format(expt, variable))
| apache-2.0 |
phdowling/scikit-learn | examples/svm/plot_custom_kernel.py | 171 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
MartinSavc/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 249 | 1563 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
jenfly/atmos-tools | testing/testing-data-gradient.py | 1 | 2370 | import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import xray
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
import pandas as pd
import atmos as atm
import precipdat
import merra
# ----------------------------------------------------------------------
datadir = atm.homedir() + 'datastore/merra/daily/'
year = 2014
subset = '_40E-120E_90S-90N'
def get_var(datadir, varnm, subset, year):
filenm = '%smerra_%s%s_%d.nc' % (datadir, varnm, subset, year)
with xray.open_dataset(filenm) as ds:
var = ds[varnm].load()
return var
uq_int = get_var(datadir, 'UFLXQV', subset, year)
vq_int = get_var(datadir, 'VFLXQV', subset, year)
mfc = atm.moisture_flux_conv(uq_int, vq_int, already_int=True)
mfcbar = mfc.mean(dim='YDim').mean(dim='XDim')
# Test atm.gradient
a = atm.constants.radius_earth.values
latdim, londim = 1, 2
lat = atm.get_coord(uq_int, 'lat')
latrad = np.radians(lat)
latrad[abs(lat) > 89] = np.nan
coslat = xray.DataArray(np.cos(latrad), coords={'YDim' : lat})
lon = atm.get_coord(uq_int, 'lon')
lonrad = np.radians(lon)
mfc_x = atm.gradient(uq_int, lonrad, londim) / (a*coslat)
mfc_y = atm.gradient(vq_int * coslat, latrad, latdim) / (a*coslat)
mfc_test = mfc_x + mfc_y
mfc_test = - atm.precip_convert(mfc_test, 'kg/m2/s', 'mm/day')
mfc_test_bar = mfc_test.mean(dim='YDim').mean(dim='XDim')
diff = mfc_test - mfc
print(diff.max())
print(diff.min())
plt.plot(mfcbar)
plt.plot(mfc_test_bar)
print(mfc_test_bar - mfcbar)
# ----------------------------------------------------------------------
# Vertical gradient du/dp
lon1, lon2 = 40, 120
pmin, pmax = 100, 300
subset_dict = {'XDim' : (lon1, lon2), 'Height' : (pmin, pmax)}
urls = merra.merra_urls([year])
month, day = 7, 15
url = urls['%d%02d%02d' % (year, month, day)]
with xray.open_dataset(url) as ds:
u = atm.subset(ds['U'], subset_dict, copy=False)
u = u.mean(dim='TIME')
pres = u['Height']
pres = atm.pres_convert(pres, pres.attrs['units'], 'Pa')
dp = np.gradient(pres)
# Calc 1
dims = u.shape
dudp = np.nan * u
for i in range(dims[1]):
for j in range(dims[2]):
dudp.values[:, i, j] = np.gradient(u[:, i, j], dp)
# Test atm.gradient
dudp_test = atm.gradient(u, pres, axis=0)
diff = dudp_test - dudp
print(diff.max())
print(diff.min())
| mit |
widdowquinn/pyani | tests/test_dependencies.py | 1 | 2627 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) The James Hutton Institute 2016-2019
# (c) University of Strathclyde 2019-2020
# Author: Leighton Pritchard
#
# Contact:
# [email protected]
#
# Leighton Pritchard,
# Strathclyde Institute for Pharmacy and Biomedical Sciences,
# 161 Cathedral Street,
# Glasgow,
# G4 0RE
# Scotland,
# UK
#
# The MIT License
#
# Copyright (c) 2016-2019 The James Hutton Institute
# Copyright (c) 2019-2020 University of Strathclyde
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Test for availability of pyani dependencies.
We only test for dependencies from non-standard libraries.
These tests are intended to be run from the repository root using:
pytest -v
"""
import subprocess
import sys
import unittest
import pytest
from pyani import pyani_config
def test_import_biopython():
"""Test Biopython import."""
import Bio
def test_import_matplotlib():
"""Test matplotlib import."""
import matplotlib
def test_import_numpy():
"""Test numpy import."""
import numpy
def test_import_pandas():
"""Test pandas import."""
import pandas
def test_import_scipy():
"""Test scipy import."""
import scipy
def test_blastn_available(blastn_available):
"""Test that BLAST+ is available."""
assert blastn_available
@pytest.mark.xfail(reason="Optional third-party executable (blastall)")
def test_run_blastall(blastall_available):
"""Test that blastall is available."""
assert blastall_available
def test_run_nucmer(nucmer_available):
"""Test that nucmer is available."""
assert nucmer_available
| mit |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/scipy/spatial/_plotutils.py | 11 | 4843 | from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.decorator import decorator as _decorator
__all__ = ['delaunay_plot_2d', 'convex_hull_plot_2d', 'voronoi_plot_2d']
@_decorator
def _held_figure(func, obj, ax=None, **kw):
import matplotlib.pyplot as plt
if ax is None:
fig = plt.figure()
ax = fig.gca()
was_held = ax.ishold()
try:
ax.hold(True)
return func(obj, ax=ax, **kw)
finally:
ax.hold(was_held)
def _adjust_bounds(ax, points):
ptp_bound = points.ptp(axis=0)
ax.set_xlim(points[:,0].min() - 0.1*ptp_bound[0],
points[:,0].max() + 0.1*ptp_bound[0])
ax.set_ylim(points[:,1].min() - 0.1*ptp_bound[1],
points[:,1].max() + 0.1*ptp_bound[1])
@_held_figure
def delaunay_plot_2d(tri, ax=None):
"""
Plot the given Delaunay triangulation in 2-D
Parameters
----------
tri : scipy.spatial.Delaunay instance
Triangulation to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Delaunay
matplotlib.pyplot.triplot
Notes
-----
Requires Matplotlib.
"""
if tri.points.shape[1] != 2:
raise ValueError("Delaunay triangulation is not 2-D")
ax.plot(tri.points[:,0], tri.points[:,1], 'o')
ax.triplot(tri.points[:,0], tri.points[:,1], tri.simplices.copy())
_adjust_bounds(ax, tri.points)
return ax.figure
@_held_figure
def convex_hull_plot_2d(hull, ax=None):
"""
Plot the given convex hull diagram in 2-D
Parameters
----------
hull : scipy.spatial.ConvexHull instance
Convex hull to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
ConvexHull
Notes
-----
Requires Matplotlib.
"""
from matplotlib.collections import LineCollection
if hull.points.shape[1] != 2:
raise ValueError("Convex hull is not 2-D")
ax.plot(hull.points[:,0], hull.points[:,1], 'o')
line_segments = []
for simplex in hull.simplices:
line_segments.append([(x, y) for x, y in hull.points[simplex]])
ax.add_collection(LineCollection(line_segments,
colors='k',
linestyle='solid'))
_adjust_bounds(ax, hull.points)
return ax.figure
@_held_figure
def voronoi_plot_2d(vor, ax=None, **kw):
"""
Plot the given Voronoi diagram in 2-D
Parameters
----------
vor : scipy.spatial.Voronoi instance
Diagram to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
show_vertices : bool, optional
Add the Voronoi vertices to the plot.
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Voronoi
Notes
-----
Requires Matplotlib.
"""
from matplotlib.collections import LineCollection
if vor.points.shape[1] != 2:
raise ValueError("Voronoi diagram is not 2-D")
ax.plot(vor.points[:,0], vor.points[:,1], '.')
if kw.get('show_vertices', True):
ax.plot(vor.vertices[:,0], vor.vertices[:,1], 'o')
line_segments = []
for simplex in vor.ridge_vertices:
simplex = np.asarray(simplex)
if np.all(simplex >= 0):
line_segments.append([(x, y) for x, y in vor.vertices[simplex]])
ax.add_collection(LineCollection(line_segments,
colors='k',
linestyle='solid'))
ptp_bound = vor.points.ptp(axis=0)
line_segments = []
center = vor.points.mean(axis=0)
for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
simplex = np.asarray(simplex)
if np.any(simplex < 0):
i = simplex[simplex >= 0][0] # finite end Voronoi vertex
t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[pointidx].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = vor.vertices[i] + direction * ptp_bound.max()
line_segments.append([(vor.vertices[i, 0], vor.vertices[i, 1]),
(far_point[0], far_point[1])])
ax.add_collection(LineCollection(line_segments,
colors='k',
linestyle='dashed'))
_adjust_bounds(ax, vor.points)
return ax.figure
| apache-2.0 |
yugangzhang/chx_backups | time_correlation_code.py | 1 | 11814 | import numpy as np
import sys
import time
import skxray.core.roi as roi
from matplotlib import gridspec
import itertools
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
mcolors = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k','darkgoldenrod','oldlace', 'brown','dodgerblue' ])
markers = itertools.cycle(list(plt.Line2D.filled_markers))
lstyles = itertools.cycle(['-', '--', '-.','.',':'])
#Dec 1, NSLS-II, yugangzhang, [email protected]
def autocor_one_time( num_buf, ring_mask, imgs, num_lev=None, start_img=None, end_img=None, bad_images = None, threshold=None):
start_time = time.time()
#print (dly)
if start_img is None:
start_img=0
if end_img is None:
try:
end_img= len(imgs)
except:
end_img= imgs.length
#print (start_img, end_img)
noframes = end_img - start_img #+ 1
#print (noframes)
if num_lev is None:num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1
nolev = num_lev
nobuf =num_buf
print ( 'The lev number is %s'%num_lev)
dly, dict_dly = delays( num_lev, num_buf, time=1 )
#print (dly.max())
lev_leng = np.array( [ len( dict_dly[i] ) for i in list(dict_dly.keys()) ])
qind, pixelist = roi.extract_label_indices( ring_mask )
noqs = np.max(qind)
nopr = np.bincount(qind, minlength=(noqs+1))[1:]
nopixels = nopr.sum()
start_time = time.time()
buf = np.ma.zeros([num_lev,num_buf,nopixels])
buf.mask = True
cts=np.zeros(num_lev)
cur=np.ones(num_lev) * num_buf
countl = np.array( np.zeros( num_lev ),dtype='int')
g2 = np.zeros( [ noframes, noframes, noqs] )
G=np.zeros( [(nolev+1)*int(nobuf/2),noqs])
IAP=np.zeros( [(nolev+1)*int(nobuf/2),noqs])
IAF=np.zeros( [(nolev+1)*int(nobuf/2),noqs])
num= np.array( np.zeros( num_lev ),dtype='int')
Num= { key: [0]* len( dict_dly[key] ) for key in list(dict_dly.keys()) }
print ('Doing g2 caculation of %s frames---'%(noframes ))
ttx=0
#if bad_images is None:bad_images=[]
for n in range( start_img, end_img ): ##do the work here
img = imgs[n]
img_ = (np.ravel(img))[pixelist]
#print ( img_.max() )
if threshold is not None:
if img_.max() >= threshold:
print ('bad image: %s here!'%n)
img_ = np.ma.zeros( len(img_) )
img_.mask = True
if bad_images is not None:
if n in bad_images:
print ('bad image: %s here!'%n)
img_ = np.ma.zeros( len(img_) )
img_.mask = True
cur[0]=1+cur[0]%num_buf # increment buffer
buf[0, cur[0]-1 ]= img_
img=[] #//save space
img_=[]
countl[0] = 1+ countl[0]
process_one_time(lev=0, bufno=cur[0]-1,
G=G,IAP=IAP,IAF=IAF, buf=buf, num=num, num_buf=num_buf, noqs=noqs, qind=qind, nopr=nopr, dly=dly, Num=Num, lev_leng=lev_leng )
#time_ind[0].append( current_img_time )
processing=1
lev=1
while processing:
if cts[lev]:
prev= 1+ (cur[lev-1]-1-1+num_buf)%num_buf
cur[lev]= 1+ cur[lev]%num_buf
countl[lev] = 1+ countl[lev]
bufa = buf[lev-1,prev-1]
bufb= buf[lev-1,cur[lev-1]-1]
if (bufa.data==0).all():
buf[lev,cur[lev]-1] = bufa
elif (bufb.data==0).all():
buf[lev,cur[lev]-1] = bufb
else:
buf[lev,cur[lev]-1] = ( bufa + bufb ) /2.
cts[lev]=0
t1_idx= (countl[lev]-1) *2
process_one_time(lev=lev, bufno=cur[lev]-1,
G=G,IAP=IAP,IAF=IAF, buf=buf, num=num, num_buf=num_buf, noqs=noqs, qind=qind, nopr=nopr, dly=dly,Num =Num, lev_leng=lev_leng )
lev+=1
#//Since this level finished, test if there is a next level for processing
if lev<num_lev:processing = 1
else:processing = 0
else:
cts[lev]=1 #// set flag to process next time
processing=0 #// can stop until more images are accumulated
if n %(noframes/10) ==0:
sys.stdout.write("#")
sys.stdout.flush()
#print G.shape
if (len(np.where(IAP==0)[0])!=0) and ( 0 not in nopr):
gmax = np.where(IAP==0)[0][0]
else:
gmax=IAP.shape[0]
#g2=G/(IAP*IAF)
#print G
g2=(G[:gmax]/(IAP[:gmax]*IAF[:gmax]))
elapsed_time = time.time() - start_time
#print (Num)
print ('Total time: %.2f min' %(elapsed_time/60.))
return g2,dly[:gmax] #, elapsed_time/60.
def process_one_time(lev, bufno,
G,IAP,IAF, buf, num, num_buf,noqs,qind,nopr, dly,Num,lev_leng ):
num[lev]+=1
if lev==0:imin=0
else:imin= int(num_buf/2 )
for i in range(imin, min(num[lev],num_buf) ):
ptr=lev*int(num_buf/2)+i
delayno=int( (bufno-i)%num_buf) #//cyclic buffers
IP = buf[lev,delayno]
IF = buf[lev,bufno]
ind = ptr - lev_leng[:lev].sum()
IP_ = IP.copy()
IF_ = IF.copy()
if (IP.data ==0).all():
IF_=np.zeros( IP.shape )
IP_= np.zeros( IP.shape )
Num[lev+1][ind] += 1
if (IF.data ==0).all():
#print ('here IF =0')
IF_ = np.zeros( IF.shape )
IP_= np.zeros( IF.shape )
if (IP.data ==0).all():
pass
else:
Num[lev+1][ind] += 1
norm_num = num[lev]-i - Num[lev+1][ind]
#print ( lev, ptr, num[lev]-i, Num[lev+1][ind] )
#print (ind, lev_leng)
if not (IP_ ==0).all():
G[ptr]+= ( np.bincount(qind, weights= IF_*IP_ )[1:]/nopr- G[ptr] )/ norm_num
IAP[ptr]+= ( np.bincount(qind, weights= IP_)[1:]/nopr-IAP[ptr] )/ norm_num
IAF[ptr]+= ( np.bincount(qind, weights= IF_)[1:]/nopr-IAF[ptr] )/ norm_num
def autocor_two_time( num_buf, ring_mask, imgs, num_lev=None, start_img=None, end_img=None ):
#print (dly)
if start_img is None:start_img=0
if end_img is None:
try:
end_img= len(imgs)
except:
end_img= imgs.length
#print (start_img, end_img)
noframes = end_img - start_img #+ 1
#print (noframes)
if num_lev is None:num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1
print ( 'The lev number is %s'%num_lev)
dly, dict_dly = delays( num_lev, num_buf, time=1 )
#print (dly.max())
qind, pixelist = roi.extract_label_indices( ring_mask )
noqs = np.max(qind)
nopr = np.bincount(qind, minlength=(noqs+1))[1:]
nopixels = nopr.sum()
start_time = time.time()
buf=np.zeros([num_lev,num_buf,nopixels]) #// matrix of buffers, for store img
cts=np.zeros(num_lev)
cur=np.ones(num_lev) * num_buf
countl = np.array( np.zeros( num_lev ),dtype='int')
g12 = np.zeros( [ noframes, noframes, noqs] )
num= np.array( np.zeros( num_lev ),dtype='int')
time_ind ={key: [] for key in range(num_lev)}
ttx=0
for n in range( start_img, end_img ): ##do the work here
cur[0]=1+cur[0]%num_buf # increment buffer
img = imgs[n]
#print ( 'The insert image is %s' %(n) )
buf[0, cur[0]-1 ]= (np.ravel(img))[pixelist]
img=[] #//save space
countl[0] = 1+ countl[0]
current_img_time = n - start_img +1
process_two_time(lev=0, bufno=cur[0]-1,n=current_img_time,
g12=g12, buf=buf, num=num, num_buf=num_buf, noqs=noqs, qind=qind, nopr=nopr, dly=dly)
time_ind[0].append( current_img_time )
processing=1
lev=1
while processing:
if cts[lev]:
prev= 1+ (cur[lev-1]-1-1+num_buf)%num_buf
cur[lev]= 1+ cur[lev]%num_buf
countl[lev] = 1+ countl[lev]
buf[lev,cur[lev]-1] = ( buf[lev-1,prev-1] + buf[lev-1,cur[lev-1]-1] ) /2.
cts[lev]=0
t1_idx= (countl[lev]-1) *2
current_img_time = ((time_ind[lev-1])[t1_idx ] + (time_ind[lev-1])[t1_idx +1 ] )/2.
time_ind[lev].append( current_img_time )
process_two_time(lev=lev, bufno=cur[lev]-1,n=current_img_time,
g12=g12, buf=buf, num=num, num_buf=num_buf, noqs=noqs, qind=qind, nopr=nopr, dly=dly)
lev+=1
#//Since this level finished, test if there is a next level for processing
if lev<num_lev:processing = 1
else:processing = 0
else:
cts[lev]=1 #// set flag to process next time
processing=0 #// can stop until more images are accumulated
if n %(noframes/10) ==0:
sys.stdout.write("#")
sys.stdout.flush()
for q in range(noqs):
x0 = g12[:,:,q]
g12[:,:,q] = np.tril(x0) + np.tril(x0).T - np.diag( np.diag(x0) )
elapsed_time = time.time() - start_time
print ('Total time: %.2f min' %(elapsed_time/60.))
return g12, elapsed_time/60.
def process_two_time(lev, bufno,n ,
g12, buf, num, num_buf,noqs,qind,nopr, dly ):
num[lev]+=1
if lev==0:imin=0
else:imin= int(num_buf/2 )
for i in range(imin, min(num[lev],num_buf) ):
ptr=lev*int(num_buf/2)+i
delayno=(bufno-i)%num_buf #//cyclic buffers
IP=buf[lev,delayno]
IF=buf[lev,bufno]
I_t12 = (np.histogram(qind, bins=noqs, weights= IF*IP))[0]
I_t1 = (np.histogram(qind, bins=noqs, weights= IP))[0]
I_t2 = (np.histogram(qind, bins=noqs, weights= IF))[0]
tind1 = (n-1)
tind2=(n -dly[ptr] -1)
if not isinstance( n, int ):
nshift = 2**(lev-1)
for i in range( -nshift+1, nshift +1 ):
#print tind1+i
g12[ int(tind1 + i), int(tind2 + i) ] =I_t12/( I_t1 * I_t2) * nopr
else:
#print tind1
g12[ tind1, tind2 ] = I_t12/( I_t1 * I_t2) * nopr
def delays( num_lev=3, num_buf=4, time=1 ):
''' DOCUMENT delays(time=)
return array of delays.
KEYWORD: time: scale delays by time ( should be time between frames)
'''
if num_buf%2!=0:print ("nobuf must be even!!!" )
dly=np.zeros( (num_lev+1)*int(num_buf/2) +1 )
dict_dly ={}
for i in range( 1,num_lev+1):
if i==1:imin= 1
else:imin= int(num_buf/2)+1
ptr=(i-1)*int(num_buf/2)+ np.arange(imin,num_buf+1)
dly[ptr]= np.arange( imin, num_buf+1) *2**(i-1)
dict_dly[i] = dly[ptr-1]
dly*=time
#print (i, ptr, imin)
return dly, dict_dly
| bsd-3-clause |
ChanChiChoi/scikit-learn | examples/gaussian_process/plot_gp_probabilistic_classification_after_regression.py | 252 | 3490 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
==============================================================================
Gaussian Processes classification example: exploiting the probabilistic output
==============================================================================
A two-dimensional regression exercise with a post-processing allowing for
probabilistic classification thanks to the Gaussian property of the prediction.
The figure illustrates the probability that the prediction is negative with
respect to the remaining uncertainty in the prediction. The red and blue lines
corresponds to the 95% confidence interval on the prediction of the zero level
set.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from scipy import stats
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
from matplotlib import cm
# Standard normal distribution functions
phi = stats.distributions.norm().pdf
PHI = stats.distributions.norm().cdf
PHIinv = stats.distributions.norm().ppf
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = g(X)
# Instanciate and fit Gaussian Process Model
gp = GaussianProcess(theta0=5e-1)
# Don't perform MLE or you'll get a perfect prediction for this simple example!
gp.fit(X, y)
# Evaluate real function, the prediction and its MSE on a grid
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_pred, MSE = gp.predict(xx, eval_MSE=True)
sigma = np.sqrt(MSE)
y_true = y_true.reshape((res, res))
y_pred = y_pred.reshape((res, res))
sigma = sigma.reshape((res, res))
k = PHIinv(.975)
# Plot the probabilistic classification iso-values using the Gaussian property
# of the prediction
fig = pl.figure(1)
ax = fig.add_subplot(111)
ax.axes.set_aspect('equal')
pl.xticks([])
pl.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
pl.xlabel('$x_1$')
pl.ylabel('$x_2$')
cax = pl.imshow(np.flipud(PHI(- y_pred / sigma)), cmap=cm.gray_r, alpha=0.8,
extent=(- lim, lim, - lim, lim))
norm = pl.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = pl.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
pl.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
pl.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = pl.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.025], colors='b',
linestyles='solid')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.5], colors='k',
linestyles='dashed')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.975], colors='r',
linestyles='solid')
pl.clabel(cs, fontsize=11)
pl.show()
| bsd-3-clause |
mfjb/scikit-learn | sklearn/utils/multiclass.py | 83 | 12343 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.ptp(y.data) == 0 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.all(clf.classes_ == unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
nhejazi/scikit-learn | sklearn/gaussian_process/tests/test_gpc.py | 31 | 5994 | """Testing for Gaussian process classification """
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils.testing import (assert_true, assert_greater,
assert_almost_equal, assert_array_equal)
def f(x):
return np.sin(x)
X = np.atleast_2d(np.linspace(0, 10, 30)).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = np.array(f(X).ravel() > 0, dtype=int)
fX = f(X).ravel()
y_mc = np.empty(y.shape, dtype=int) # multi-class
y_mc[fX < -0.35] = 0
y_mc[(fX >= -0.35) & (fX < 0.35)] = 1
y_mc[fX > 0.35] = 2
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=0.1), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))]
def test_predict_consistent():
# Check binary predict decision has also predicted probability above 0.5.
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_array_equal(gpc.predict(X),
gpc.predict_proba(X)[:, 1] >= 0.5)
def test_lml_improving():
# Test that hyperparameter-tuning improves log-marginal likelihood.
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(kernel.theta))
def test_lml_precomputed():
# Test that lml of optimized kernel is stored correctly.
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_almost_equal(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(), 7)
def test_converged_to_local_maximum():
# Test that we are in local maximum after hyperparameter-optimization.
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpc.log_marginal_likelihood(gpc.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4) |
(gpc.kernel_.theta == gpc.kernel_.bounds[:, 0]) |
(gpc.kernel_.theta == gpc.kernel_.bounds[:, 1])))
def test_lml_gradient():
# Compare analytic and numeric gradient of log marginal likelihood.
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = gpc.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpc.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the log marginal likelihood of the chosen theta.
n_samples, n_features = 25, 2
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = (np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)) > 0
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1e-3] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features)
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessClassifier(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
def test_custom_optimizer():
# Test that GPC can use externally defined optimizers.
# Define a dummy optimizer that simply tests 50 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(50):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpc = GaussianProcessClassifier(kernel=kernel, optimizer=optimizer)
gpc.fit(X, y_mc)
# Checks that optimizer improved marginal likelihood
assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(kernel.theta))
def test_multi_class():
# Test GPC for multi-class classification problems.
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
assert_almost_equal(y_prob.sum(1), 1)
y_pred = gpc.predict(X2)
assert_array_equal(np.argmax(y_prob, 1), y_pred)
def test_multi_class_n_jobs():
# Test that multi-class GPC produces identical results with n_jobs>1.
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
gpc_2 = GaussianProcessClassifier(kernel=kernel, n_jobs=2)
gpc_2.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
y_prob_2 = gpc_2.predict_proba(X2)
assert_almost_equal(y_prob, y_prob_2)
| bsd-3-clause |
geo-fluid-dynamics/phaseflow-fenics | docs/conf.py | 1 | 5674 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('.'))
# -- Mock out libraries with C dependencies ----------------------------------
import sys
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ["fenics", "numpy", "matplotlib"]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- Project information -----------------------------------------------------
project = 'Phaseflow'
copyright = '2018, Alexander G. Zimmerman'
author = 'Alexander G. Zimmerman'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = 'Alpha'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'numpydoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Phaseflowdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Phaseflow.tex', 'Phaseflow Documentation',
'Alexander G. Zimmerman', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'phaseflow', 'Phaseflow Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Phaseflow', 'Phaseflow Documentation',
author, 'Phaseflow', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| mit |
petosegan/scikit-learn | benchmarks/bench_glmnet.py | 297 | 3848 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
treycausey/scikit-learn | examples/grid_search_digits.py | 8 | 2665 | """
=====================================================================
Parameter estimation using grid search with a nested cross-validation
=====================================================================
This examples shows how a classifier is optimized by "nested"
cross-validation, which is done using the
:class:`sklearn.grid_search.GridSearchCV` object on a development set
that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5, scoring=score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_estimator_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() / 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
samuelstjean/dipy | doc/examples/snr_in_cc.py | 1 | 6475 | """
=============================================
SNR estimation for Diffusion-Weighted Images
=============================================
Computing the Signal-to-Noise-Ratio (SNR) of DW images is still an open question,
as SNR depends on the white matter structure of interest as well as
the gradient direction corresponding to each DWI.
In classical MRI, SNR can be defined as the ratio of the mean
of the signal divided by the standard deviation of the
underlying Gaussian noise, that is SNR = mean(signal) / std(noise).
The noise standard deviation can be
computed from the background in any of the DW images. How do we compute
the mean of the signal, and what signal?
The strategy here is to compute a 'worst-case' SNR for DWI. Several white matter
structures such as the corpus callosum (CC), corticospinal tract (CST), or
the superior longitudinal fasciculus (SLF) can be easily identified from
the colored-FA (cfa) map. In this example, we will use voxels from the CC,
which have the characteristic of being highly RED in the cfa map since they are mainly oriented in
the left-right direction. We know that the DW image
closest to the x-direction will be the one with the most attenuated diffusion signal.
This is the strategy adopted in several recent papers (see [1]_ and [2]_). It gives a good
indication of the quality of the DWI data.
First, we compute the tensor model in a brain mask (see the DTI example for more explanation).
"""
from __future__ import division, print_function
import nibabel as nib
import numpy as np
from dipy.data import fetch_stanford_hardi, read_stanford_hardi
from dipy.segment.mask import median_otsu
from dipy.reconst.dti import TensorModel
fetch_stanford_hardi()
img, gtab = read_stanford_hardi()
data = img.get_data()
affine = img.get_affine()
print('Computing brain mask...')
b0_mask, mask = median_otsu(data)
print('Computing tensors...')
tenmodel = TensorModel(gtab)
tensorfit = tenmodel.fit(data, mask=mask)
"""Next, we set our red-blue-green thresholds to (0.6, 1) in the x axis
and (0, 0.1) in the y and z axes respectively.
These values work well in practice to isolate the very RED voxels of the cfa map.
Then, as assurance, we want just RED voxels in the CC (there could be
noisy red voxels around the brain mask and we don't want those). Unless the brain
acquisition was badly aligned, the CC is always close to the mid-sagittal slice.
The following lines perform these two operations and then saves the computed mask.
"""
print('Computing worst-case/best-case SNR using the corpus callosum...')
from dipy.segment.mask import segment_from_cfa
from dipy.segment.mask import bounding_box
threshold = (0.6, 1, 0, 0.1, 0, 0.1)
CC_box = np.zeros_like(data[..., 0])
mins, maxs = bounding_box(mask)
mins = np.array(mins)
maxs = np.array(maxs)
diff = (maxs - mins) // 4
bounds_min = mins + diff
bounds_max = maxs - diff
CC_box[bounds_min[0]:bounds_max[0],
bounds_min[1]:bounds_max[1],
bounds_min[2]:bounds_max[2]] = 1
mask_cc_part, cfa = segment_from_cfa(tensorfit, CC_box,
threshold, return_cfa=True)
cfa_img = nib.Nifti1Image((cfa*255).astype(np.uint8), affine)
mask_cc_part_img = nib.Nifti1Image(mask_cc_part.astype(np.uint8), affine)
nib.save(mask_cc_part_img, 'mask_CC_part.nii.gz')
import matplotlib.pyplot as plt
region = 40
fig = plt.figure('Corpus callosum segmentation')
plt.subplot(1, 2, 1)
plt.title("Corpus callosum (CC)")
plt.axis('off')
red = cfa[..., 0]
plt.imshow(np.rot90(red[region, ...]))
plt.subplot(1, 2, 2)
plt.title("CC mask used for SNR computation")
plt.axis('off')
plt.imshow(np.rot90(mask_cc_part[region, ...]))
fig.savefig("CC_segmentation.png", bbox_inches='tight')
"""
.. figure:: CC_segmentation.png
:align: center
"""
"""Now that we are happy with our crude CC mask that selected voxels in the x-direction,
we can use all the voxels to estimate the mean signal in this region.
"""
mean_signal = np.mean(data[mask_cc_part], axis=0)
"""Now, we need a good background estimation. We will re-use the brain mask
computed before and invert it to catch the outside of the brain. This could
also be determined manually with a ROI in the background.
[Warning: Certain MR manufacturers mask out the outside of the brain with 0's.
One thus has to be careful how the noise ROI is defined].
"""
from scipy.ndimage.morphology import binary_dilation
mask_noise = binary_dilation(mask, iterations=10)
mask_noise[..., :mask_noise.shape[-1]//2] = 1
mask_noise = ~mask_noise
mask_noise_img = nib.Nifti1Image(mask_noise.astype(np.uint8), affine)
nib.save(mask_noise_img, 'mask_noise.nii.gz')
noise_std = np.std(data[mask_noise, :])
"""We can now compute the SNR for each DWI. For example, report SNR
for DW images with gradient direction that lies the closest to
the X, Y and Z axes.
"""
# Exclude null bvecs from the search
idx = np.sum(gtab.bvecs, axis=-1) == 0
gtab.bvecs[idx] = np.inf
axis_X = np.argmin(np.sum((gtab.bvecs-np.array([1, 0, 0]))**2, axis=-1))
axis_Y = np.argmin(np.sum((gtab.bvecs-np.array([0, 1, 0]))**2, axis=-1))
axis_Z = np.argmin(np.sum((gtab.bvecs-np.array([0, 0, 1]))**2, axis=-1))
for direction in [0, axis_X, axis_Y, axis_Z]:
SNR = mean_signal[direction]/noise_std
if direction == 0 :
print("SNR for the b=0 image is :", SNR)
else :
print("SNR for direction", direction, " ", gtab.bvecs[direction], "is :", SNR)
"""SNR for the b=0 image is : ''42.0695455758''"""
"""SNR for direction 58 [ 0.98875 0.1177 -0.09229] is : ''5.46995373635''"""
"""SNR for direction 57 [-0.05039 0.99871 0.0054406] is : ''23.9329492871''"""
"""SNR for direction 126 [-0.11825 -0.039925 0.99218 ] is : ''23.9965694823''"""
"""
Since the CC is aligned with the X axis, the lowest SNR is for that gradient
direction. In comparison, the DW images in
the perpendical Y and Z axes have a high SNR. The b0 still exhibits the highest SNR,
since there is no signal attenuation.
Hence, we can say the Stanford diffusion
data has a 'worst-case' SNR of approximately 5, a
'best-case' SNR of approximately 24, and a SNR of 42 on the b0 image.
"""
"""
References:
.. [1] Descoteaux, M., Deriche, R., Le Bihan, D., Mangin, J.-F., and Poupon, C.
Multiple q-shell diffusion propagator imaging.
Medical image analysis, 15(4), 603, 2011.
.. [2] Jones, D. K., Knosche, T. R., & Turner, R.
White Matter Integrity, Fiber Count, and Other Fallacies: The Dos and Don'ts of Diffusion MRI.
NeuroImage, 73, 239, 2013.
"""
| bsd-3-clause |
iABC2XYZ/abc | DM_RFGAP/Test1.py | 1 | 1355 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 26 08:29:25 2017
@author: A
"""
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from BetaGammaC import *
from GenPartilces import *
from EmitNG import *
from Statistics import *
from BasicInput import *
WeightsAlphaT=tf.Variable(tf.random_uniform(shape=[3],minval=-1.,maxval=1.))
WeightsBetaT=tf.Variable(tf.random_uniform(shape=[3],minval=0.1,maxval=4.))
WeightsPotential=tf.Variable(tf.random_uniform(shape=[numCav],minval=30.,maxval=200.))
WeightsPhis=tf.Variable(tf.random_uniform(shape=[numCav],minval=-np.pi/2.,maxval=np.pi/2.))
#################################################################
constEmitG=EmitN2G(constEnergyInMeV,constEmitN)
x,xp,y,yp,z,zp=Gen6DPart5Twiss(constEmitG,WeightsAlphaT,WeightsBetaT,numPart)
emitT,alphaT,betaT,gammaT=GetTwiss6D(x,xp,y,yp,z,zp)
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
print sess.run(constEmitG)
print sess.run(emitT)
print "_________________"
print(sess.run(WeightsAlphaT))
print(sess.run(alphaT))
print "_________________"
print(sess.run(WeightsBetaT))
print(sess.run(betaT))
print "_________________"
print sess.run(constEmitN)
print sess.run(EmitG2N(constEnergyInMeV,emitT))
print('OK')
| gpl-3.0 |
meteorcloudy/tensorflow | tensorflow/examples/learn/hdf5_classification.py | 75 | 2899 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, hdf5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
import h5py # pylint: disable=g-bad-import-order
X_FEATURE = 'x' # Name of the input feature.
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Note that we are saving and load iris data as h5 format as a simple
# demonstration here.
h5f = h5py.File('/tmp/test_hdf5.h5', 'w')
h5f.create_dataset('X_train', data=x_train)
h5f.create_dataset('X_test', data=x_test)
h5f.create_dataset('y_train', data=y_train)
h5f.create_dataset('y_test', data=y_test)
h5f.close()
h5f = h5py.File('/tmp/test_hdf5.h5', 'r')
x_train = np.array(h5f['X_train'])
x_test = np.array(h5f['X_test'])
y_train = np.array(h5f['y_train'])
y_test = np.array(h5f['y_test'])
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column(
X_FEATURE, shape=np.array(x_train).shape[1:])]
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=200)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class_ids'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
low-sky/cohrscld | cloudplots.py | 1 | 1779 | import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from astropy.table import Table
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.serif'] = ['Times New Roman']
mpl.rcParams['font.size'] = 14
mpl.rc('xtick', labelsize=14)
mpl.rc('ytick', labelsize=14)
t = Table.read('cohrs_sfecat2.fits')
mlum = t['mlum_msun']
R0 = 8.5e3
rgal = (R0**2+t['distance']**2-2*R0*t['distance']*np.cos(t['x_coor']*np.pi/180))**0.5
bins = 5
edges = np.linspace(0, 100, bins+1)
plt.clf()
fig = plt.figure(figsize=(5, 4))
ax = fig.add_subplot(111, aspect='equal')
for pmin, pmax in zip(edges[0:-1], edges[1:]):
rmin = np.percentile(rgal, pmin)
rmax = np.percentile(rgal, pmax)
idx = (rgal >= rmin) * (rgal < rmax)
m = np.sort(mlum[idx])
n = np.sum(idx)
plt.loglog(m, np.linspace(n, 1, n),
label='{0:2.1f} kpc - {1:2.1f} kpc'.format(rmin / 1e3,
rmax / 1e3),
drawstyle='steps')
plt.xlim([1e1, 2e6])
plt.xlabel(r'Mass ($M_\odot$)')
plt.ylabel(r'$N(>M)$')
plt.legend(loc='lower left', fontsize=12)
plt.grid()
plt.tight_layout()
plt.savefig('mass_spec.png',dpi=300)
edges = np.array([15,25,35,45,55])
plt.clf()
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(111,aspect='equal')
for lmin,lmax in zip(edges[0:-1],edges[1:]):
idx = (t['x_coor']>=lmin)*(t['x_coor']<lmax)
m = np.sort(mlum[idx])
n = np.sum(idx)
plt.loglog(m,np.linspace(n,1,n),
label=r'{0:2.0f}$^\circ$ < $\ell$ < {1:2.0f}$^\circ$'.format(lmin,lmax),
drawstyle='steps')
plt.xlim([1e1,1e7])
plt.xlabel(r'Mass ($M_\odot$)',size=16)
plt.ylabel(r'$N(>M)$',size=16)
plt.legend()
plt.tight_layout()
plt.savefig('mass_angle_spec.png', dpi=300)
| gpl-3.0 |
Erechtheus/geolocation | TrainIndividualModelsCNN.py | 1 | 20093 | #Some code to train CNN's instead of LSTMs
#Performance slightly lower than LSTM
#Load stuff:
from sklearn.utils import shuffle
import os
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = ""
import pickle
import numpy as np
import time
from keras.layers import Dropout, Dense, BatchNormalization, SpatialDropout1D, Conv1D, GlobalMaxPooling1D
from keras.layers.embeddings import Embedding
import math
import datetime
from keras import Input
from keras import Model
import json
#Load configuration from file
if os.path.isfile('config.json'):
print("Loading configutation from configuration file {config.json}")
with open('config.json') as json_file:
config = json.load(json_file)
binaryPath = config['binaryPath']
modelPath = config['modelPath']
else:
print("Configuration file {config.json} not found")
binaryPath = 'data/binaries/' # Place where the serialized training data is
modelPath = 'data/models/' # Place to store the models
#Load preprocessed data...
file = open(binaryPath +"processors.obj",'rb')
descriptionTokenizer, domainEncoder, tldEncoder, locationTokenizer, sourceEncoder, textTokenizer, nameTokenizer, timeZoneTokenizer, utcEncoder, langEncoder, placeMedian, colnames, classEncoder = pickle.load(file)
file = open(binaryPath +"data.obj",'rb')
trainDescription, trainLocation, trainDomain, trainTld, trainSource, trainTexts, trainUserName, trainTZ, trainUtc, trainUserLang, trainCreatedAt, classes= pickle.load(file)
#Shuffle train-data
trainDescription, trainLocation, trainDomain, trainTld, trainSource, trainTexts, trainUserName, trainTZ, trainUtc, trainUserLang, trainCreatedAt, classes = shuffle(trainDescription, trainLocation, trainDomain, trainTld, trainSource, trainTexts, trainUserName, trainTZ, trainUtc, trainUserLang, trainCreatedAt, classes, random_state=1202)
##################Train
# create the model
batch_size = 256
nb_epoch = 5
verbosity=2
filters = 450
kernel_size = 3
descriptionEmbeddings = 100
locEmbeddings = 50
textEmbeddings = 100
nameEmbeddings = 100
tzEmbeddings = 50
validation_split = 0.01 #91279 samples for validation
callbacks = [
# EarlyStopping(monitor='val_loss', min_delta=1e-4, patience=2, verbose=1, restore_best_weights=True),
# ReduceLROnPlateau(monitor='val_loss', factor=0.1, min_delta=1e-4, patience=2, cooldown=1, verbose=1),
## ModelCheckpoint(filepath='twitter.h5', monitor='loss', verbose=0, save_best_only=True),
]
####################
#1.) Description Model
descriptionBranchI = Input(shape=(None,), name="inputDescription")
descriptionBranch = Embedding(descriptionTokenizer.num_words,
descriptionEmbeddings,
#input_length=MAX_DESC_SEQUENCE_LENGTH,
#mask_zero=True
)(descriptionBranchI)
descriptionBranch = SpatialDropout1D(rate=0.2)(descriptionBranch) #Masks the same embedding element for all tokens
descriptionBranch = BatchNormalization()(descriptionBranch)
descriptionBranch = Dropout(0.2)(descriptionBranch)
descriptionBranch = Conv1D(filters,kernel_size, padding='valid',activation='relu', strides=1)(descriptionBranch)
descriptionBranch = GlobalMaxPooling1D()(descriptionBranch)
descriptionBranch = BatchNormalization()(descriptionBranch)
descriptionBranch = Dropout(0.2, name="description")(descriptionBranch)
descriptionBranchO = Dense(len(set(classes)), activation='softmax')(descriptionBranch)
descriptionModel = Model(inputs=descriptionBranchI, outputs=descriptionBranchO)
descriptionModel.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
start = time.time()
descriptionHistory = descriptionModel.fit(trainDescription, classes,
epochs=nb_epoch, batch_size=batch_size,
verbose=verbosity, validation_split=validation_split,callbacks=callbacks
)
print("descriptionBranch finished after " +str(datetime.timedelta(seconds=round(time.time() - start))))
descriptionModel.save(modelPath +'descriptionBranchNorm.h5')
#####################
#2a.) Link Model for Domain
categorial = np.zeros((len(trainDomain), len(domainEncoder.classes_)), dtype="bool")
for i in range(len(trainDomain)):
categorial[i, trainDomain[i]] = True
trainDomain = categorial
domainBranchI = Input(shape=(trainDomain.shape[1],), name="inputDomain")
domainBranch = Dense(int(math.log2(trainDomain.shape[1])), input_shape=(trainDomain.shape[1],), activation='relu')(domainBranchI)
domainBranch = BatchNormalization()(domainBranch)
domainBranch = Dropout(0.2, name="domainName")(domainBranch)
domainBranchO = Dense(len(set(classes)), activation='softmax')(domainBranch)
domainModel = Model(inputs=domainBranchI, outputs=domainBranchO)
domainModel.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
start = time.time()
sourceHistory = domainModel.fit(trainDomain, classes,
epochs=nb_epoch, batch_size=batch_size,
verbose=verbosity, validation_split=validation_split,callbacks=callbacks
)
print("tldBranch finished after " +str(datetime.timedelta(seconds=round(time.time() - start))))
domainModel.save(modelPath + 'domainBranch.h5')
#2b.) Link Model for TLD
categorial = np.zeros((len(trainTld), len(tldEncoder.classes_)), dtype="bool")
for i in range(len(trainTld)):
categorial[i, trainTld[i]] = True
trainTld = categorial
tldBranchI = Input(shape=(trainTld.shape[1],), name="inputTld")
tldBranch = Dense(int(math.log2(trainTld.shape[1])), input_shape=(trainTld.shape[1],), activation='relu')(tldBranchI)
tldBranch = BatchNormalization()(tldBranch)
tldBranch = Dropout(0.2, name="tld")(tldBranch)
tldBranchO = Dense(len(set(classes)), activation='softmax')(tldBranch)
tldBranchModel = Model(inputs=tldBranchI, outputs=tldBranchO)
tldBranchModel.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
start = time.time()
sourceHistory = tldBranchModel.fit(trainTld, classes,
epochs=nb_epoch, batch_size=batch_size,
verbose=verbosity, validation_split=validation_split,callbacks=callbacks
)
print("tldBranch finished after " +str(datetime.timedelta(seconds=round(time.time() - start))))
tldBranchModel.save(modelPath + 'tldBranch.h5')
#2c.)Merged Model
linkBranchI= Input(shape=((trainDomain.shape[1] + trainTld.shape[1]),), name="inputLink")
linkBranch = Dense(int(math.log2(trainDomain.shape[1] + trainTld.shape[1])), input_shape=((trainDomain.shape[1] + trainTld.shape[1]),), activation='relu')(linkBranchI)
linkBranch = BatchNormalization()(linkBranch)
linkBranch = Dropout(0.2, name="linkModel")(linkBranch)
linkBranchO = Dense(len(set(classes)), activation='softmax')(linkBranch)
linkModel = Model(inputs=linkBranchI, outputs=linkBranchO)
linkModel.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
start = time.time()
sourceHistory = linkModel.fit(np.concatenate((trainDomain, trainTld), axis=1), classes,
epochs=nb_epoch, batch_size=batch_size,
verbose=verbosity, validation_split=validation_split,callbacks=callbacks
)
print("linkModel finished after " +str(datetime.timedelta(seconds=round(time.time() - start))))
linkModel.save(modelPath + 'linkModel.h5')
#####################
#3.) location Model
locationBranchI = Input(shape=(None,), name="inputLocation")
locationBranch = Embedding(locationTokenizer.num_words,
locEmbeddings,
#input_length=MAX_LOC_SEQUENCE_LENGTH,
#mask_zero=True
)(locationBranchI)
locationBranch = SpatialDropout1D(rate=0.2)(locationBranch)#Masks the same embedding element for all tokens
locationBranch = BatchNormalization()(locationBranch)
locationBranch = Dropout(0.2)(locationBranch)
locationBranch = Conv1D(filters,kernel_size, padding='valid',activation='relu', strides=1)(locationBranch)
locationBranch = GlobalMaxPooling1D()(locationBranch)
locationBranch = BatchNormalization()(locationBranch)
locationBranch = Dropout(0.2, name="location")(locationBranch)
locationBranchO = Dense(len(set(classes)), activation='softmax')(locationBranch)
locationModel = Model(inputs=locationBranchI, outputs=locationBranchO)
locationModel.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
start = time.time()
locationHistory = locationModel.fit(trainLocation, classes,
epochs=nb_epoch, batch_size=batch_size,
verbose=verbosity, validation_split=validation_split,callbacks=callbacks
)
print("locationHistory finished after " +str(datetime.timedelta(seconds=round(time.time() - start))))
locationModel.save(modelPath +'locationBranchNorm.h5')
#####################
#4.) Source Mode
categorial = np.zeros((len(trainSource), len(sourceEncoder.classes_)), dtype="bool")
for i in range(len(trainSource)):
categorial[i, trainSource[i]] = True
trainSource = categorial
sourceBranchI = Input(shape=(trainSource.shape[1],), name="inputSource")
sourceBranch = Dense(int(math.log2(trainSource.shape[1])), input_shape=(trainSource.shape[1],), activation='relu')(sourceBranchI)
sourceBranch = BatchNormalization()(sourceBranch)
sourceBranch = Dropout(0.2, name="source")(sourceBranch)
sourceBranchO = Dense(len(set(classes)), activation='softmax')(sourceBranch)
sourceModel = Model(inputs=sourceBranchI, outputs=sourceBranchO)
sourceModel.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
start = time.time()
sourceHistory = sourceModel.fit(trainSource, classes,
epochs=nb_epoch, batch_size=batch_size,
verbose=verbosity, validation_split=validation_split,callbacks=callbacks
)
print("sourceBranch finished after " +str(datetime.timedelta(seconds=round(time.time() - start))))
sourceModel.save(modelPath +'sourceBranch.h5')
#####################
#5.) Text Model
textBranchI = Input(shape=(None,), name="inputText")
textBranch = Embedding(textTokenizer.num_words,
textEmbeddings,
#input_length=MAX_TEXT_SEQUENCE_LENGTH,
#mask_zero=True
)(textBranchI)
textBranch = SpatialDropout1D(rate=0.2)(textBranch) #Masks the same embedding element for all tokens
textBranch = BatchNormalization()(textBranch)
textBranch = Dropout(0.2)(textBranch)
textBranch = Conv1D(filters,kernel_size, padding='valid',activation='relu', strides=1)(textBranch)
textBranch = GlobalMaxPooling1D()(textBranch)
textBranch = BatchNormalization()(textBranch)
textBranch = Dropout(0.2, name="text")(textBranch)
textBranchO = Dense(len(set(classes)), activation='softmax')(textBranch)
textModel = Model(inputs=textBranchI, outputs=textBranchO)
textModel.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
start = time.time()
textHistory = textModel.fit(trainTexts, classes,
epochs=nb_epoch, batch_size=batch_size,
verbose=verbosity, validation_split=validation_split,callbacks=callbacks
)
print("textBranch finished after " +str(datetime.timedelta(seconds=round(time.time() - start))))
textModel.save(modelPath +'textBranchNorm.h5')
#####################
# 6.) Name Model
nameBranchI = Input(shape=(None,), name="inputName")
nameBranch = Embedding(nameTokenizer.num_words,
nameEmbeddings,
#input_length=MAX_NAME_SEQUENCE_LENGTH,
#mask_zero=True
)(nameBranchI)
nameBranch = SpatialDropout1D(rate=0.2)(nameBranch) #Masks the same embedding element for all tokens
nameBranch = BatchNormalization()(nameBranch)
nameBranch = Dropout(0.2)(nameBranch)
nameBranch = Conv1D(filters,kernel_size, padding='valid',activation='relu', strides=1)(nameBranch)
nameBranch = GlobalMaxPooling1D()(nameBranch)
nameBranch = BatchNormalization()(nameBranch)
nameBranch = Dropout(0.2, name="username")(nameBranch)
nameBranchO = Dense(len(set(classes)), activation='softmax')(nameBranch)
nameModel = Model(inputs=nameBranchI, outputs=nameBranchO)
nameModel.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
start = time.time()
nameHistory = nameModel.fit(trainUserName, classes,
epochs=nb_epoch, batch_size=batch_size,
verbose=verbosity, validation_split=validation_split,callbacks=callbacks
)
print("nameBranch finished after " +str(datetime.timedelta(seconds=round(time.time() - start))))
nameModel.save(modelPath +'nameBranchNorm.h5')
#####################
# 7.) TimeZone Model
tzBranchI = Input(shape=(None,), name="inputTimeZone")
tzBranch = Embedding(timeZoneTokenizer.num_words,
tzEmbeddings,
#input_length=MAX_TZ_SEQUENCE_LENGTH,
#mask_zero=True
)(tzBranchI)
tzBranch = SpatialDropout1D(rate=0.2)(tzBranch) #Masks the same embedding element for all tokens
tzBranch = BatchNormalization()(tzBranch)
tzBranch = Dropout(0.2)(tzBranch)
tzBranch = Conv1D(filters,kernel_size, padding='valid',activation='relu', strides=1)(tzBranch)
tzBranch = GlobalMaxPooling1D()(tzBranch)
tzBranch = BatchNormalization()(tzBranch)
tzBranch = Dropout(0.2, name="timezone")(tzBranch)
tzBranchO = Dense(len(set(classes)), activation='softmax')(tzBranch)
tzBranchModel = Model(inputs=tzBranchI, outputs=tzBranchO)
tzBranchModel.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
start = time.time()
tzHistory = tzBranchModel.fit(trainTZ, classes,
epochs=nb_epoch, batch_size=batch_size,
verbose=verbosity, validation_split=validation_split,callbacks=callbacks
)
print("tzBranch finished after " +str(datetime.timedelta(seconds=round(time.time() - start))))
tzBranchModel.save(modelPath +'tzBranchNorm.h5')
#####################
# 8.) UTC Model
categorial = np.zeros((len(trainUtc), len(utcEncoder.classes_)), dtype="bool")
for i in range(len(trainUtc)):
categorial[i, trainUtc[i]] = True
trainUtc = categorial
utcBranchI = Input(shape=(trainUtc.shape[1],), name="inputUTC")
utcBranch = Dense(int(math.log2(trainUtc.shape[1])), activation='relu')(utcBranchI)
utcBranch = BatchNormalization()(utcBranch)
utcBranch = Dropout(0.2, name="utc")(utcBranch)
utcBranchO = Dense(len(set(classes)), activation='softmax')(utcBranch)
utcBranchModel = Model(inputs=utcBranchI, outputs=utcBranchO)
utcBranchModel.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
start = time.time()
utcHistory = utcBranchModel.fit(trainUtc, classes,
epochs=nb_epoch, batch_size=batch_size,
verbose=verbosity, validation_split=validation_split,callbacks=callbacks
)
print("utcBranch finished after " +str(datetime.timedelta(seconds=round(time.time() - start))))
utcBranchModel.save(modelPath +'utcBranch.h5')
#9) "User Language
categorial = np.zeros((len(trainUserLang), len(langEncoder.classes_)), dtype="bool")
for i in range(len(trainUserLang)):
categorial[i, trainUserLang[i]] = True
trainUserLang = categorial
userLangBranchI = Input(shape=(trainUserLang.shape[1],), name="inputUserLang")
userLangBranch = Dense(int(math.log2(trainUserLang.shape[1])),input_shape=(trainUserLang.shape[1],), activation='relu')(userLangBranchI)
userLangBranch = BatchNormalization()(userLangBranch)
userLangBranch = Dropout(0.2, name="userLang")(userLangBranch)
userLangBranchO = Dense(len(set(classes)), activation='softmax')(userLangBranch)
userLangModel = Model(inputs=userLangBranchI, outputs=userLangBranchO)
userLangModel.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
start = time.time()
userLangHistory = userLangModel.fit(trainUserLang, classes,
epochs=nb_epoch, batch_size=batch_size,
verbose=verbosity, validation_split=validation_split,callbacks=callbacks
)
print("userLangBranch finished after " +str(datetime.timedelta(seconds=round(time.time() - start))))
userLangModel.save(modelPath +'userLangBranch.h5')
#10a Tweet-time (as number)
tweetTimeBranchI = Input(shape=(trainCreatedAt.shape[1],), name="inputTweetTime")
tweetTimeBranch = Dense(2, name="tweetTime")(tweetTimeBranchI)# simple-no-operation layer, which is used in the merged model especially
tweetTimeBranchO = Dense(len(set(classes)), activation='softmax')(tweetTimeBranch)
tweetTimeModel = Model(inputs=tweetTimeBranchI, outputs=tweetTimeBranchO)
tweetTimeModel.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
start = time.time()
timeHistory = tweetTimeModel.fit(trainCreatedAt, classes,
epochs=nb_epoch, batch_size=batch_size,
verbose=verbosity, validation_split=validation_split,callbacks=callbacks
)
print("tweetTimeModel finished after " +str(datetime.timedelta(seconds=round(time.time() - start))))
tweetTimeModel.save(modelPath + 'tweetTimeBranch.h5')
#10) #Tweet-Time (120-categorial; instead of number)
"""
categorial = np.zeros((len(trainCreatedAt), len(timeEncoder.classes_)), dtype="bool")
for i in range(len(trainCreatedAt)):
categorial[i, trainCreatedAt[i]] = True
trainCreatedAt = categorial
tweetTimeBranchI = Input(shape=(trainCreatedAt.shape[1],), name="inputTweetTime")
tweetTimeBranch = Dense(int(math.log2(trainCreatedAt.shape[1])), input_shape=(trainCreatedAt.shape[1],), activation='relu')(tweetTimeBranchI)
tweetTimeBranch = BatchNormalization()(tweetTimeBranch)
tweetTimeBranch = Dropout(0.2, name="tweetTime")(tweetTimeBranch)
tweetTimeBranchO = Dense(len(set(classes)), activation='softmax')(tweetTimeBranch)
tweetTimeModel = Model(inputs=tweetTimeBranchI, outputs=tweetTimeBranchO)
tweetTimeModel.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
start = time.time()
timeHistory = tweetTimeModel.fit(trainCreatedAt, classes,
epochs=nb_epoch, batch_size=batch_size,
verbose=verbosity, validation_split=validation_split,callbacks=callbacks
)
print("tweetTimeModel finished after " +str(datetime.timedelta(seconds=round(time.time() - start))))
tweetTimeModel.save(modelPath + 'tweetTimeBranch.h5')
"""
#11) Merged sequential model
trainData = np.concatenate((trainDomain, trainTld, trainSource, trainUserLang), axis=1)
categorialBranchI = Input(shape=(trainData.shape[1],), name="inputCategorial")
categorialBranch = Dense(int(math.log2(trainData.shape[1])), input_shape=(trainData.shape[1],), activation='relu')(categorialBranchI)
categorialBranch = BatchNormalization()(categorialBranch)
categorialBranch = Dropout(0.2, name="categorialModel")(categorialBranch)
categorialBranchO = Dense(len(set(classes)), activation='softmax')(categorialBranch)
categorialModel = Model(inputs=categorialBranchI, outputs=categorialBranchO)
categorialModel.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
start = time.time()
categorialModelHistory = categorialModel.fit(trainData, classes,
epochs=nb_epoch, batch_size=batch_size,
verbose=verbosity, validation_split=validation_split,callbacks=callbacks
)
print("categorialModel finished after " +str(datetime.timedelta(seconds=time.time() - start)))
categorialModel.save(modelPath + 'categorialModel.h5') | gpl-3.0 |
nhuntwalker/astroML | book_figures/appendix/fig_sdss_filters.py | 3 | 2175 | r"""
SDSS Filters
------------
Figure C.1
The five SDSS filters, showing the total transmission taking into account
atmospheric transmission and instrumental effects such as CCD efficiency.
Shown for reference is the spectrum (:math:`F_\lambda`) of a star
similar to Vega (alpha-Lyr), which for many years was used as a reference
flux for magnitude calibration.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from matplotlib import pyplot as plt
from astroML.datasets import fetch_sdss_filter, fetch_vega_spectrum
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Set up figure and axes
fig = plt.figure(figsize=(5, 3.75))
ax = fig.add_subplot(111)
#----------------------------------------------------------------------
# Fetch and plot the Vega spectrum
spec = fetch_vega_spectrum()
lam = spec[0]
spectrum = spec[1] / 2.1 / spec[1].max()
ax.plot(lam, spectrum, '-k')
#------------------------------------------------------------
# Fetch and plot the five filters
text_kwargs = dict(ha='center', va='center', alpha=0.5, fontsize=14)
for f, c, loc in zip('ugriz', 'bgrmk', [3500, 4600, 6100, 7500, 8800]):
data = fetch_sdss_filter(f)
ax.fill(data[0], data[1], ec=c, fc=c, alpha=0.4)
ax.text(loc, 0.02, f, color=c, **text_kwargs)
ax.set_xlim(3000, 11000)
ax.set_title('SDSS Filters and Reference Spectrum')
ax.set_xlabel('Wavelength (Angstroms)')
ax.set_ylabel('normalized flux / filter transmission')
plt.show()
| bsd-2-clause |
winklerand/pandas | pandas/core/series.py | 1 | 107124 | """
Data structure for 1-dimensional cross-sectional and time series data
"""
from __future__ import division
# pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
import types
import warnings
from textwrap import dedent
import numpy as np
import numpy.ma as ma
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_bool,
is_integer, is_integer_dtype,
is_float_dtype,
is_extension_type,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_list_like,
is_hashable,
is_iterator,
is_dict_like,
is_scalar,
_is_unorderable_exception,
_ensure_platform_int,
pandas_dtype)
from pandas.core.dtypes.generic import (
ABCSparseArray, ABCDataFrame, ABCIndexClass)
from pandas.core.dtypes.cast import (
maybe_upcast, infer_dtype_from_scalar,
maybe_convert_platform,
maybe_cast_to_datetime, maybe_castable,
construct_1d_arraylike_from_scalar)
from pandas.core.dtypes.missing import isna, notna, remove_na_arraylike
from pandas.core.common import (is_bool_indexer,
_default_index,
_asarray_tuplesafe,
_values_from_object,
_maybe_match_name,
SettingWithCopyError,
_maybe_box_datetimelike,
standardize_mapping,
_any_none)
from pandas.core.index import (Index, MultiIndex, InvalidIndexError,
Float64Index, _ensure_index)
from pandas.core.indexing import check_bool_indexer, maybe_convert_indices
from pandas.core import generic, base
from pandas.core.internals import SingleBlockManager
from pandas.core.categorical import Categorical, CategoricalAccessor
import pandas.core.strings as strings
from pandas.core.indexes.accessors import CombinedDatetimelikeProperties
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.indexes.period import PeriodIndex
from pandas import compat
from pandas.io.formats.terminal import get_terminal_size
from pandas.compat import (
zip, u, OrderedDict, StringIO, range, get_range_parameters)
from pandas.compat.numpy import function as nv
from pandas.core import accessor
import pandas.core.ops as ops
import pandas.core.algorithms as algorithms
import pandas.core.common as com
import pandas.core.nanops as nanops
import pandas.io.formats.format as fmt
from pandas.util._decorators import (
Appender, deprecate, deprecate_kwarg, Substitution)
from pandas.util._validators import validate_bool_kwarg
from pandas._libs import index as libindex, tslib as libts, lib, iNaT
from pandas.core.config import get_option
import pandas.plotting._core as gfx
__all__ = ['Series']
_shared_doc_kwargs = dict(
axes='index', klass='Series', axes_single_arg="{0, 'index'}",
inplace="""inplace : boolean, default False
If True, performs operation inplace and returns None.""",
unique='np.ndarray', duplicated='Series',
optional_by='', optional_mapper='', optional_labels='', optional_axis='',
versionadded_to_excel='\n .. versionadded:: 0.20.0\n')
# see gh-16971
def remove_na(arr):
"""
DEPRECATED : this function will be removed in a future version.
"""
warnings.warn("remove_na is deprecated and is a private "
"function. Do not use.", FutureWarning, stacklevel=2)
return remove_na_arraylike(arr)
def _coerce_method(converter):
""" install the scalar coercion methods """
def wrapper(self):
if len(self) == 1:
return converter(self.iloc[0])
raise TypeError("cannot convert the series to "
"{0}".format(str(converter)))
return wrapper
# ----------------------------------------------------------------------
# Series class
class Series(base.IndexOpsMixin, generic.NDFrame):
"""
One-dimensional ndarray with axis labels (including time series).
Labels need not be unique but must be a hashable type. The object
supports both integer- and label-based indexing and provides a host of
methods for performing operations involving the index. Statistical
methods from ndarray have been overridden to automatically exclude
missing data (currently represented as NaN).
Operations between Series (+, -, /, *, **) align values based on their
associated index values-- they need not be the same length. The result
index will be the sorted union of the two indexes.
Parameters
----------
data : array-like, dict, or scalar value
Contains data stored in Series
index : array-like or Index (1d)
Values must be hashable and have the same length as `data`.
Non-unique index values are allowed. Will default to
RangeIndex(len(data)) if not provided. If both a dict and index
sequence are used, the index will override the keys found in the
dict.
dtype : numpy.dtype or None
If None, dtype will be inferred
copy : boolean, default False
Copy input data
"""
_metadata = ['name']
_accessors = frozenset(['dt', 'cat', 'str'])
_deprecations = generic.NDFrame._deprecations | frozenset(
['sortlevel', 'reshape', 'get_value', 'set_value', 'from_csv'])
_allow_index_ops = True
def __init__(self, data=None, index=None, dtype=None, name=None,
copy=False, fastpath=False):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
if index is None:
index = data.index
else:
if index is not None:
index = _ensure_index(index)
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, MultiIndex):
raise NotImplementedError("initializing a Series from a "
"MultiIndex is not supported")
elif isinstance(data, Index):
# need to copy to avoid aliasing issues
if name is None:
name = data.name
data = data._to_embed(keep_tz=True, dtype=dtype)
copy = False
elif isinstance(data, np.ndarray):
pass
elif isinstance(data, Series):
if name is None:
name = data.name
if index is None:
index = data.index
else:
data = data.reindex(index, copy=copy)
data = data._data
elif isinstance(data, dict):
data, index = self._init_dict(data, index, dtype)
dtype = None
copy = False
elif isinstance(data, SingleBlockManager):
if index is None:
index = data.index
else:
data = data.reindex(index, copy=copy)
elif isinstance(data, Categorical):
# GH12574: Allow dtype=category only, otherwise error
if ((dtype is not None) and
not is_categorical_dtype(dtype)):
raise ValueError("cannot specify a dtype with a "
"Categorical unless "
"dtype='category'")
elif (isinstance(data, types.GeneratorType) or
(compat.PY3 and isinstance(data, map))):
data = list(data)
elif isinstance(data, (set, frozenset)):
raise TypeError("{0!r} type is unordered"
"".format(data.__class__.__name__))
else:
# handle sparse passed here (and force conversion)
if isinstance(data, ABCSparseArray):
data = data.to_dense()
if index is None:
if not is_list_like(data):
data = [data]
index = _default_index(len(data))
# create/copy the manager
if isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype=dtype, errors='ignore',
copy=copy)
elif copy:
data = data.copy()
else:
data = _sanitize_array(data, index, dtype, copy,
raise_cast_failure=True)
data = SingleBlockManager(data, index, fastpath=True)
generic.NDFrame.__init__(self, data, fastpath=True)
self.name = name
self._set_axis(0, index, fastpath=True)
def _init_dict(self, data, index=None, dtype=None):
"""
Derive the "_data" and "index" attributes of a new Series from a
dictionary input.
Parameters
----------
data : dict or dict-like
Data used to populate the new Series
index : Index or index-like, default None
index for the new Series: if None, use dict keys
dtype : dtype, default None
dtype for the new Series: if None, infer from data
Returns
-------
_data : BlockManager for the new Series
index : index for the new Series
"""
# Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')]
# raises KeyError), so we iterate the entire dict, and align
if data:
keys, values = zip(*compat.iteritems(data))
else:
keys, values = [], []
# Input is now list-like, so rely on "standard" construction:
s = Series(values, index=keys, dtype=dtype)
# Now we just make sure the order is respected, if any
if index is not None:
s = s.reindex(index, copy=False)
elif not isinstance(data, OrderedDict):
try:
s = s.sort_index()
except TypeError:
pass
return s._data, s.index
@classmethod
def from_array(cls, arr, index=None, name=None, dtype=None, copy=False,
fastpath=False):
"""
DEPRECATED: use the pd.Series(..) constructor instead.
"""
warnings.warn("'from_array' is deprecated and will be removed in a "
"future version. Please use the pd.Series(..) "
"constructor instead.", FutureWarning, stacklevel=2)
return cls._from_array(arr, index=index, name=name, dtype=dtype,
copy=copy, fastpath=fastpath)
@classmethod
def _from_array(cls, arr, index=None, name=None, dtype=None, copy=False,
fastpath=False):
"""
Internal method used in DataFrame.__setitem__/__getitem__.
Difference with Series(..) is that this method checks if a sparse
array is passed.
"""
# return a sparse series here
if isinstance(arr, ABCSparseArray):
from pandas.core.sparse.series import SparseSeries
cls = SparseSeries
return cls(arr, index=index, name=name, dtype=dtype, copy=copy,
fastpath=fastpath)
@property
def _constructor(self):
return Series
@property
def _constructor_expanddim(self):
from pandas.core.frame import DataFrame
return DataFrame
# types
@property
def _can_hold_na(self):
return self._data._can_hold_na
_index = None
def _set_axis(self, axis, labels, fastpath=False):
""" override generic, we want to set the _typ here """
if not fastpath:
labels = _ensure_index(labels)
is_all_dates = labels.is_all_dates
if is_all_dates:
if not isinstance(labels,
(DatetimeIndex, PeriodIndex, TimedeltaIndex)):
try:
labels = DatetimeIndex(labels)
# need to set here becuase we changed the index
if fastpath:
self._data.set_axis(axis, labels)
except (libts.OutOfBoundsDatetime, ValueError):
# labels may exceeds datetime bounds,
# or not be a DatetimeIndex
pass
self._set_subtyp(is_all_dates)
object.__setattr__(self, '_index', labels)
if not fastpath:
self._data.set_axis(axis, labels)
def _set_subtyp(self, is_all_dates):
if is_all_dates:
object.__setattr__(self, '_subtyp', 'time_series')
else:
object.__setattr__(self, '_subtyp', 'series')
def _update_inplace(self, result, **kwargs):
# we want to call the generic version and not the IndexOpsMixin
return generic.NDFrame._update_inplace(self, result, **kwargs)
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if value is not None and not is_hashable(value):
raise TypeError('Series.name must be a hashable type')
object.__setattr__(self, '_name', value)
# ndarray compatibility
@property
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@property
def dtypes(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@property
def ftype(self):
""" return if the data is sparse|dense """
return self._data.ftype
@property
def ftypes(self):
""" return if the data is sparse|dense """
return self._data.ftype
@property
def values(self):
"""
Return Series as ndarray or ndarray-like
depending on the dtype
Returns
-------
arr : numpy.ndarray or ndarray-like
Examples
--------
>>> pd.Series([1, 2, 3]).values
array([1, 2, 3])
>>> pd.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
>>> pd.Series(list('aabc')).astype('category').values
[a, a, b, c]
Categories (3, object): [a, b, c]
Timezone aware datetime data is converted to UTC:
>>> pd.Series(pd.date_range('20130101', periods=3,
... tz='US/Eastern')).values
array(['2013-01-01T05:00:00.000000000',
'2013-01-02T05:00:00.000000000',
'2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]')
"""
return self._data.external_values()
@property
def _values(self):
""" return the internal repr of this data """
return self._data.internal_values()
def _formatting_values(self):
"""Return the values that can be formatted (used by SeriesFormatter
and DataFrameFormatter)
"""
return self._data.formatting_values()
def get_values(self):
""" same as values (but handles sparseness conversions); is a view """
return self._data.get_values()
@property
def asobject(self):
"""
return object Series which contains boxed values
*this is an internal non-public method*
"""
return self._data.asobject
# ops
def ravel(self, order='C'):
"""
Return the flattened underlying data as an ndarray
See also
--------
numpy.ndarray.ravel
"""
return self._values.ravel(order=order)
def compress(self, condition, *args, **kwargs):
"""
Return selected slices of an array along given axis as a Series
See also
--------
numpy.ndarray.compress
"""
nv.validate_compress(args, kwargs)
return self[condition]
def nonzero(self):
"""
Return the indices of the elements that are non-zero
This method is equivalent to calling `numpy.nonzero` on the
series data. For compatability with NumPy, the return value is
the same (a tuple with an array of indices for each dimension),
but it will always be a one-item tuple because series only have
one dimension.
Examples
--------
>>> s = pd.Series([0, 3, 0, 4])
>>> s.nonzero()
(array([1, 3]),)
>>> s.iloc[s.nonzero()[0]]
1 3
3 4
dtype: int64
See Also
--------
numpy.nonzero
"""
return self._values.nonzero()
def put(self, *args, **kwargs):
"""
Applies the `put` method to its `values` attribute
if it has one.
See also
--------
numpy.ndarray.put
"""
self._values.put(*args, **kwargs)
def __len__(self):
"""
return the length of the Series
"""
return len(self._data)
def view(self, dtype=None):
return self._constructor(self._values.view(dtype),
index=self.index).__finalize__(self)
def __array__(self, result=None):
"""
the array interface, return my values
"""
return self.get_values()
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
return self._constructor(result, index=self.index,
copy=False).__finalize__(self)
def __array_prepare__(self, result, context=None):
"""
Gets called prior to a ufunc
"""
# nice error message for non-ufunc types
if context is not None and not isinstance(self._values, np.ndarray):
obj = context[1][0]
raise TypeError("{obj} with dtype {dtype} cannot perform "
"the numpy op {op}".format(
obj=type(obj).__name__,
dtype=getattr(obj, 'dtype', None),
op=context[0].__name__))
return result
# complex
@property
def real(self):
return self.values.real
@real.setter
def real(self, v):
self.values.real = v
@property
def imag(self):
return self.values.imag
@imag.setter
def imag(self, v):
self.values.imag = v
# coercion
__float__ = _coerce_method(float)
__long__ = _coerce_method(int)
__int__ = _coerce_method(int)
def _unpickle_series_compat(self, state):
if isinstance(state, dict):
self._data = state['_data']
self.name = state['name']
self.index = self._data.index
elif isinstance(state, tuple):
# < 0.12 series pickle
nd_state, own_state = state
# recreate the ndarray
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
# backwards compat
index, name = own_state[0], None
if len(own_state) > 1:
name = own_state[1]
# recreate
self._data = SingleBlockManager(data, index, fastpath=True)
self._index = index
self.name = name
else:
raise Exception("cannot unpickle legacy formats -> [%s]" % state)
# indexers
@property
def axes(self):
"""Return a list of the row axis labels"""
return [self.index]
def _ixs(self, i, axis=0):
"""
Return the i-th value or values in the Series by location
Parameters
----------
i : int, slice, or sequence of integers
Returns
-------
value : scalar (int) or Series (slice, sequence)
"""
try:
# dispatch to the values if we need
values = self._values
if isinstance(values, np.ndarray):
return libindex.get_value_at(values, i)
else:
return values[i]
except IndexError:
raise
except Exception:
if isinstance(i, slice):
indexer = self.index._convert_slice_indexer(i, kind='iloc')
return self._get_values(indexer)
else:
label = self.index[i]
if isinstance(label, Index):
return self.take(i, axis=axis, convert=True)
else:
return libindex.get_value_at(self, i)
@property
def _is_mixed_type(self):
return False
def _slice(self, slobj, axis=0, kind=None):
slobj = self.index._convert_slice_indexer(slobj,
kind=kind or 'getitem')
return self._get_values(slobj)
def __getitem__(self, key):
key = com._apply_if_callable(key, self)
try:
result = self.index.get_value(self, key)
if not is_scalar(result):
if is_list_like(result) and not isinstance(result, Series):
# we need to box if we have a non-unique index here
# otherwise have inline ndarray/lists
if not self.index.is_unique:
result = self._constructor(
result, index=[key] * len(result),
dtype=self.dtype).__finalize__(self)
return result
except InvalidIndexError:
pass
except (KeyError, ValueError):
if isinstance(key, tuple) and isinstance(self.index, MultiIndex):
# kludge
pass
elif key is Ellipsis:
return self
elif is_bool_indexer(key):
pass
else:
# we can try to coerce the indexer (or this will raise)
new_key = self.index._convert_scalar_indexer(key,
kind='getitem')
if type(new_key) != type(key):
return self.__getitem__(new_key)
raise
except Exception:
raise
if is_iterator(key):
key = list(key)
if com.is_bool_indexer(key):
key = check_bool_indexer(self.index, key)
return self._get_with(key)
def _get_with(self, key):
# other: fancy integer or otherwise
if isinstance(key, slice):
indexer = self.index._convert_slice_indexer(key, kind='getitem')
return self._get_values(indexer)
elif isinstance(key, ABCDataFrame):
raise TypeError('Indexing a Series with DataFrame is not '
'supported, use the appropriate DataFrame column')
else:
if isinstance(key, tuple):
try:
return self._get_values_tuple(key)
except Exception:
if len(key) == 1:
key = key[0]
if isinstance(key, slice):
return self._get_values(key)
raise
# pragma: no cover
if not isinstance(key, (list, np.ndarray, Series, Index)):
key = list(key)
if isinstance(key, Index):
key_type = key.inferred_type
else:
key_type = lib.infer_dtype(key)
if key_type == 'integer':
if self.index.is_integer() or self.index.is_floating():
return self.loc[key]
else:
return self._get_values(key)
elif key_type == 'boolean':
return self._get_values(key)
else:
try:
# handle the dup indexing case (GH 4246)
if isinstance(key, (list, tuple)):
return self.loc[key]
return self.reindex(key)
except Exception:
# [slice(0, 5, None)] will break if you convert to ndarray,
# e.g. as requested by np.median
# hack
if isinstance(key[0], slice):
return self._get_values(key)
raise
def _get_values_tuple(self, key):
# mpl hackaround
if _any_none(*key):
return self._get_values(key)
if not isinstance(self.index, MultiIndex):
raise ValueError('Can only tuple-index with a MultiIndex')
# If key is contained, would have returned by now
indexer, new_index = self.index.get_loc_level(key)
return self._constructor(self._values[indexer],
index=new_index).__finalize__(self)
def _get_values(self, indexer):
try:
return self._constructor(self._data.get_slice(indexer),
fastpath=True).__finalize__(self)
except Exception:
return self._values[indexer]
def __setitem__(self, key, value):
key = com._apply_if_callable(key, self)
def setitem(key, value):
try:
self._set_with_engine(key, value)
return
except (SettingWithCopyError):
raise
except (KeyError, ValueError):
values = self._values
if (is_integer(key) and
not self.index.inferred_type == 'integer'):
values[key] = value
return
elif key is Ellipsis:
self[:] = value
return
elif com.is_bool_indexer(key):
pass
elif is_timedelta64_dtype(self.dtype):
# reassign a null value to iNaT
if isna(value):
value = iNaT
try:
self.index._engine.set_value(self._values, key,
value)
return
except TypeError:
pass
self.loc[key] = value
return
except TypeError as e:
if (isinstance(key, tuple) and
not isinstance(self.index, MultiIndex)):
raise ValueError("Can only tuple-index with a MultiIndex")
# python 3 type errors should be raised
if _is_unorderable_exception(e):
raise IndexError(key)
if com.is_bool_indexer(key):
key = check_bool_indexer(self.index, key)
try:
self._where(~key, value, inplace=True)
return
except InvalidIndexError:
pass
self._set_with(key, value)
# do the setitem
cacher_needs_updating = self._check_is_chained_assignment_possible()
setitem(key, value)
if cacher_needs_updating:
self._maybe_update_cacher()
def _set_with_engine(self, key, value):
values = self._values
try:
self.index._engine.set_value(values, key, value)
return
except KeyError:
values[self.index.get_loc(key)] = value
return
def _set_with(self, key, value):
# other: fancy integer or otherwise
if isinstance(key, slice):
indexer = self.index._convert_slice_indexer(key, kind='getitem')
return self._set_values(indexer, value)
else:
if isinstance(key, tuple):
try:
self._set_values(key, value)
except Exception:
pass
if not isinstance(key, (list, Series, np.ndarray, Series)):
try:
key = list(key)
except Exception:
key = [key]
if isinstance(key, Index):
key_type = key.inferred_type
else:
key_type = lib.infer_dtype(key)
if key_type == 'integer':
if self.index.inferred_type == 'integer':
self._set_labels(key, value)
else:
return self._set_values(key, value)
elif key_type == 'boolean':
self._set_values(key.astype(np.bool_), value)
else:
self._set_labels(key, value)
def _set_labels(self, key, value):
if isinstance(key, Index):
key = key.values
else:
key = _asarray_tuplesafe(key)
indexer = self.index.get_indexer(key)
mask = indexer == -1
if mask.any():
raise ValueError('%s not contained in the index' % str(key[mask]))
self._set_values(indexer, value)
def _set_values(self, key, value):
if isinstance(key, Series):
key = key._values
self._data = self._data.setitem(indexer=key, value=value)
self._maybe_update_cacher()
@deprecate_kwarg(old_arg_name='reps', new_arg_name='repeats')
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an Series. Refer to `numpy.ndarray.repeat`
for more information about the `repeats` argument.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
new_index = self.index.repeat(repeats)
new_values = self._values.repeat(repeats)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def reshape(self, *args, **kwargs):
"""
.. deprecated:: 0.19.0
Calling this method will raise an error. Please call
``.values.reshape(...)`` instead.
return an ndarray with the values shape
if the specified shape matches exactly the current shape, then
return self (for compat)
See also
--------
numpy.ndarray.reshape
"""
warnings.warn("reshape is deprecated and will raise "
"in a subsequent release. Please use "
".values.reshape(...) instead", FutureWarning,
stacklevel=2)
if len(args) == 1 and hasattr(args[0], '__iter__'):
shape = args[0]
else:
shape = args
if tuple(shape) == self.shape:
# XXX ignoring the "order" keyword.
nv.validate_reshape(tuple(), kwargs)
return self
return self._values.reshape(shape, **kwargs)
def get_value(self, label, takeable=False):
"""
Quickly retrieve single value at passed index label
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : label
takeable : interpret the index as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(label, takeable=takeable)
def _get_value(self, label, takeable=False):
if takeable is True:
return _maybe_box_datetimelike(self._values[label])
return self.index.get_value(self._values, label)
_get_value.__doc__ = get_value.__doc__
def set_value(self, label, value, takeable=False):
"""
Quickly set single value at passed label. If label is not contained, a
new object is created with the label placed at the end of the result
index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
label : object
Partial indexing with MultiIndex not allowed
value : object
Scalar value
takeable : interpret the index as indexers, default False
Returns
-------
series : Series
If label is contained, will be reference to calling Series,
otherwise a new object
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(label, value, takeable=takeable)
def _set_value(self, label, value, takeable=False):
try:
if takeable:
self._values[label] = value
else:
self.index._engine.set_value(self._values, label, value)
except KeyError:
# set using a non-recursive method
self.loc[label] = value
return self
_set_value.__doc__ = set_value.__doc__
def reset_index(self, level=None, drop=False, name=None, inplace=False):
"""
Analogous to the :meth:`pandas.DataFrame.reset_index` function, see
docstring there.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default
drop : boolean, default False
Do not try to insert index into dataframe columns
name : object, default None
The name of the column corresponding to the Series values
inplace : boolean, default False
Modify the Series in place (do not create a new object)
Returns
----------
resetted : DataFrame, or Series if drop == True
Examples
--------
>>> s = pd.Series([1, 2, 3, 4], index=pd.Index(['a', 'b', 'c', 'd'],
... name = 'idx'))
>>> s.reset_index()
index 0
0 0 1
1 1 2
2 2 3
3 3 4
>>> arrays = [np.array(['bar', 'bar', 'baz', 'baz', 'foo',
... 'foo', 'qux', 'qux']),
... np.array(['one', 'two', 'one', 'two', 'one', 'two',
... 'one', 'two'])]
>>> s2 = pd.Series(
... np.random.randn(8),
... index=pd.MultiIndex.from_arrays(arrays,
... names=['a', 'b']))
>>> s2.reset_index(level='a')
a 0
b
one bar -0.286320
two bar -0.587934
one baz 0.710491
two baz -1.429006
one foo 0.790700
two foo 0.824863
one qux -0.718963
two qux -0.055028
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if drop:
new_index = _default_index(len(self))
if level is not None and isinstance(self.index, MultiIndex):
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < len(self.index.levels):
new_index = self.index.droplevel(level)
if inplace:
self.index = new_index
# set name if it was passed, otherwise, keep the previous name
self.name = name or self.name
else:
return self._constructor(self._values.copy(),
index=new_index).__finalize__(self)
elif inplace:
raise TypeError('Cannot reset_index inplace on a Series '
'to create a DataFrame')
else:
df = self.to_frame(name)
return df.reset_index(level=level, drop=drop)
def __unicode__(self):
"""
Return a string representation for a particular DataFrame
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
buf = StringIO(u(""))
width, height = get_terminal_size()
max_rows = (height if get_option("display.max_rows") == 0 else
get_option("display.max_rows"))
show_dimensions = get_option("display.show_dimensions")
self.to_string(buf=buf, name=self.name, dtype=self.dtype,
max_rows=max_rows, length=show_dimensions)
result = buf.getvalue()
return result
def to_string(self, buf=None, na_rep='NaN', float_format=None, header=True,
index=True, length=False, dtype=False, name=False,
max_rows=None):
"""
Render a string representation of the Series
Parameters
----------
buf : StringIO-like, optional
buffer to write to
na_rep : string, optional
string representation of NAN to use, default 'NaN'
float_format : one-parameter function, optional
formatter function to apply to columns' elements if they are floats
default None
header: boolean, default True
Add the Series header (index name)
index : bool, optional
Add index (row) labels, default True
length : boolean, default False
Add the Series length
dtype : boolean, default False
Add the Series dtype
name : boolean, default False
Add the Series name if not None
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
Returns
-------
formatted : string (if not buffer passed)
"""
formatter = fmt.SeriesFormatter(self, name=name, length=length,
header=header, index=index,
dtype=dtype, na_rep=na_rep,
float_format=float_format,
max_rows=max_rows)
result = formatter.to_string()
# catch contract violations
if not isinstance(result, compat.text_type):
raise AssertionError("result must be of type unicode, type"
" of result is {0!r}"
"".format(result.__class__.__name__))
if buf is None:
return result
else:
try:
buf.write(result)
except AttributeError:
with open(buf, 'w') as f:
f.write(result)
def iteritems(self):
"""
Lazily iterate over (index, value) tuples
"""
return zip(iter(self.index), iter(self))
items = iteritems
# ----------------------------------------------------------------------
# Misc public methods
def keys(self):
"""Alias for index"""
return self.index
def to_dict(self, into=dict):
"""
Convert Series to {label -> value} dict or dict-like object.
Parameters
----------
into : class, default dict
The collections.Mapping subclass to use as the return
object. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
.. versionadded:: 0.21.0
Returns
-------
value_dict : collections.Mapping
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_dict()
{0: 1, 1: 2, 2: 3, 3: 4}
>>> from collections import OrderedDict, defaultdict
>>> s.to_dict(OrderedDict)
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> dd = defaultdict(list)
>>> s.to_dict(dd)
defaultdict(<type 'list'>, {0: 1, 1: 2, 2: 3, 3: 4})
"""
# GH16122
into_c = standardize_mapping(into)
return into_c(compat.iteritems(self))
def to_frame(self, name=None):
"""
Convert Series to DataFrame
Parameters
----------
name : object, default None
The passed name should substitute for the series name (if it has
one).
Returns
-------
data_frame : DataFrame
"""
if name is None:
df = self._constructor_expanddim(self)
else:
df = self._constructor_expanddim({name: self})
return df
def to_sparse(self, kind='block', fill_value=None):
"""
Convert Series to SparseSeries
Parameters
----------
kind : {'block', 'integer'}
fill_value : float, defaults to NaN (missing)
Returns
-------
sp : SparseSeries
"""
from pandas.core.sparse.series import SparseSeries
return SparseSeries(self, kind=kind,
fill_value=fill_value).__finalize__(self)
def _set_name(self, name, inplace=False):
"""
Set the Series name.
Parameters
----------
name : str
inplace : bool
whether to modify `self` directly or return a copy
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
ser = self if inplace else self.copy()
ser.name = name
return ser
# ----------------------------------------------------------------------
# Statistics, overridden ndarray methods
# TODO: integrate bottleneck
def count(self, level=None):
"""
Return number of non-NA/null observations in the Series
Parameters
----------
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a smaller Series
Returns
-------
nobs : int or Series (if level specified)
"""
from pandas.core.index import _get_na_value
if level is None:
return notna(_values_from_object(self)).sum()
if isinstance(level, compat.string_types):
level = self.index._get_level_number(level)
lev = self.index.levels[level]
lab = np.array(self.index.labels[level], subok=False, copy=True)
mask = lab == -1
if mask.any():
lab[mask] = cnt = len(lev)
lev = lev.insert(cnt, _get_na_value(lev.dtype.type))
obs = lab[notna(self.values)]
out = np.bincount(obs, minlength=len(lev) or None)
return self._constructor(out, index=lev,
dtype='int64').__finalize__(self)
def mode(self):
"""Return the mode(s) of the dataset.
Always returns Series even if only one value is returned.
Returns
-------
modes : Series (sorted)
"""
# TODO: Add option for bins like value_counts()
return algorithms.mode(self)
@Appender(base._shared_docs['unique'] % _shared_doc_kwargs)
def unique(self):
result = super(Series, self).unique()
if is_datetime64tz_dtype(self.dtype):
# we are special casing datetime64tz_dtype
# to return an object array of tz-aware Timestamps
# TODO: it must return DatetimeArray with tz in pandas 2.0
result = result.asobject.values
return result
@Appender(base._shared_docs['drop_duplicates'] % _shared_doc_kwargs)
def drop_duplicates(self, keep='first', inplace=False):
return super(Series, self).drop_duplicates(keep=keep, inplace=inplace)
@Appender(base._shared_docs['duplicated'] % _shared_doc_kwargs)
def duplicated(self, keep='first'):
return super(Series, self).duplicated(keep=keep)
def idxmin(self, axis=None, skipna=True, *args, **kwargs):
"""
Index *label* of the first occurrence of minimum of values.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
Raises
------
ValueError
* If the Series is empty
Returns
-------
idxmin : Index of minimum of values
Notes
-----
This method is the Series version of ``ndarray.argmin``. This method
returns the label of the minimum, while ``ndarray.argmin`` returns
the position. To get the position, use ``series.values.argmin()``.
See Also
--------
DataFrame.idxmin
numpy.ndarray.argmin
"""
skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs)
i = nanops.nanargmin(_values_from_object(self), skipna=skipna)
if i == -1:
return np.nan
return self.index[i]
def idxmax(self, axis=None, skipna=True, *args, **kwargs):
"""
Index *label* of the first occurrence of maximum of values.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
Raises
------
ValueError
* If the Series is empty
Returns
-------
idxmax : Index of maximum of values
Notes
-----
This method is the Series version of ``ndarray.argmax``. This method
returns the label of the maximum, while ``ndarray.argmax`` returns
the position. To get the position, use ``series.values.argmax()``.
See Also
--------
DataFrame.idxmax
numpy.ndarray.argmax
"""
skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs)
i = nanops.nanargmax(_values_from_object(self), skipna=skipna)
if i == -1:
return np.nan
return self.index[i]
# ndarray compat
argmin = deprecate('argmin', idxmin,
msg="'argmin' is deprecated, use 'idxmin' instead. "
"The behavior of 'argmin' will be corrected to "
"return the positional minimum in the future. "
"Use 'series.values.argmin' to get the position of "
"the minimum now.")
argmax = deprecate('argmax', idxmax,
msg="'argmax' is deprecated, use 'idxmax' instead. "
"The behavior of 'argmax' will be corrected to "
"return the positional maximum in the future. "
"Use 'series.values.argmax' to get the position of "
"the maximum now.")
def round(self, decimals=0, *args, **kwargs):
"""
Round each value in a Series to the given number of decimals.
Parameters
----------
decimals : int
Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of
positions to the left of the decimal point.
Returns
-------
Series object
See Also
--------
numpy.around
DataFrame.round
"""
nv.validate_round(args, kwargs)
result = _values_from_object(self).round(decimals)
result = self._constructor(result, index=self.index).__finalize__(self)
return result
def quantile(self, q=0.5, interpolation='linear'):
"""
Return value at the given quantile, a la numpy.percentile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantile : float or Series
if ``q`` is an array, a Series will be returned where the
index is ``q`` and the values are the quantiles.
Examples
--------
>>> s = Series([1, 2, 3, 4])
>>> s.quantile(.5)
2.5
>>> s.quantile([.25, .5, .75])
0.25 1.75
0.50 2.50
0.75 3.25
dtype: float64
"""
self._check_percentile(q)
result = self._data.quantile(qs=q, interpolation=interpolation)
if is_list_like(q):
return self._constructor(result,
index=Float64Index(q),
name=self.name)
else:
# scalar
return result
def corr(self, other, method='pearson', min_periods=None):
"""
Compute correlation with `other` Series, excluding missing values
Parameters
----------
other : Series
method : {'pearson', 'kendall', 'spearman'}
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
min_periods : int, optional
Minimum number of observations needed to have a valid result
Returns
-------
correlation : float
"""
this, other = self.align(other, join='inner', copy=False)
if len(this) == 0:
return np.nan
return nanops.nancorr(this.values, other.values, method=method,
min_periods=min_periods)
def cov(self, other, min_periods=None):
"""
Compute covariance with Series, excluding missing values
Parameters
----------
other : Series
min_periods : int, optional
Minimum number of observations needed to have a valid result
Returns
-------
covariance : float
Normalized by N-1 (unbiased estimator).
"""
this, other = self.align(other, join='inner', copy=False)
if len(this) == 0:
return np.nan
return nanops.nancov(this.values, other.values,
min_periods=min_periods)
def diff(self, periods=1):
"""
1st discrete difference of object
Parameters
----------
periods : int, default 1
Periods to shift for forming difference
Returns
-------
diffed : Series
"""
result = algorithms.diff(_values_from_object(self), periods)
return self._constructor(result, index=self.index).__finalize__(self)
def autocorr(self, lag=1):
"""
Lag-N autocorrelation
Parameters
----------
lag : int, default 1
Number of lags to apply before performing autocorrelation.
Returns
-------
autocorr : float
"""
return self.corr(self.shift(lag))
def dot(self, other):
"""
Matrix multiplication with DataFrame or inner-product with Series
objects
Parameters
----------
other : Series or DataFrame
Returns
-------
dot_product : scalar or Series
"""
from pandas.core.frame import DataFrame
if isinstance(other, (Series, DataFrame)):
common = self.index.union(other.index)
if (len(common) > len(self.index) or
len(common) > len(other.index)):
raise ValueError('matrices are not aligned')
left = self.reindex(index=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[0] != rvals.shape[0]:
raise Exception('Dot product shape mismatch, %s vs %s' %
(lvals.shape, rvals.shape))
if isinstance(other, DataFrame):
return self._constructor(np.dot(lvals, rvals),
index=other.columns).__finalize__(self)
elif isinstance(other, Series):
return np.dot(lvals, rvals)
elif isinstance(rvals, np.ndarray):
return np.dot(lvals, rvals)
else: # pragma: no cover
raise TypeError('unsupported type: %s' % type(other))
@Substitution(klass='Series')
@Appender(base._shared_docs['searchsorted'])
@deprecate_kwarg(old_arg_name='v', new_arg_name='value')
def searchsorted(self, value, side='left', sorter=None):
if sorter is not None:
sorter = _ensure_platform_int(sorter)
return self._values.searchsorted(Series(value)._values,
side=side, sorter=sorter)
# -------------------------------------------------------------------
# Combination
def append(self, to_append, ignore_index=False, verify_integrity=False):
"""
Concatenate two or more Series.
Parameters
----------
to_append : Series or list/tuple of Series
ignore_index : boolean, default False
If True, do not use the index labels.
.. versionadded:: 0.19.0
verify_integrity : boolean, default False
If True, raise Exception on creating index with duplicates
Notes
-----
Iteratively appending to a Series can be more computationally intensive
than a single concatenate. A better solution is to append values to a
list and then concatenate the list with the original Series all at
once.
See also
--------
pandas.concat : General function to concatenate DataFrame, Series
or Panel objects
Returns
-------
appended : Series
Examples
--------
>>> s1 = pd.Series([1, 2, 3])
>>> s2 = pd.Series([4, 5, 6])
>>> s3 = pd.Series([4, 5, 6], index=[3,4,5])
>>> s1.append(s2)
0 1
1 2
2 3
0 4
1 5
2 6
dtype: int64
>>> s1.append(s3)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With `ignore_index` set to True:
>>> s1.append(s2, ignore_index=True)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With `verify_integrity` set to True:
>>> s1.append(s2, verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: [0, 1, 2]
"""
from pandas.core.reshape.concat import concat
if isinstance(to_append, (list, tuple)):
to_concat = [self] + to_append
else:
to_concat = [self, to_append]
return concat(to_concat, ignore_index=ignore_index,
verify_integrity=verify_integrity)
def _binop(self, other, func, level=None, fill_value=None):
"""
Perform generic binary operation with optional fill value
Parameters
----------
other : Series
func : binary operator
fill_value : float or object
Value to substitute for NA/null values. If both Series are NA in a
location, the result will be NA regardless of the passed fill value
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level
Returns
-------
combined : Series
"""
if not isinstance(other, Series):
raise AssertionError('Other operand must be Series')
new_index = self.index
this = self
if not self.index.equals(other.index):
this, other = self.align(other, level=level, join='outer',
copy=False)
new_index = this.index
this_vals = this.values
other_vals = other.values
if fill_value is not None:
this_mask = isna(this_vals)
other_mask = isna(other_vals)
this_vals = this_vals.copy()
other_vals = other_vals.copy()
# one but not both
mask = this_mask ^ other_mask
this_vals[this_mask & mask] = fill_value
other_vals[other_mask & mask] = fill_value
with np.errstate(all='ignore'):
result = func(this_vals, other_vals)
name = _maybe_match_name(self, other)
result = self._constructor(result, index=new_index, name=name)
result = result.__finalize__(self)
if name is None:
# When name is None, __finalize__ overwrites current name
result.name = None
return result
def combine(self, other, func, fill_value=np.nan):
"""
Perform elementwise binary operation on two Series using given function
with optional fill value when an index is missing from one Series or
the other
Parameters
----------
other : Series or scalar value
func : function
Function that takes two scalars as inputs and return a scalar
fill_value : scalar value
Returns
-------
result : Series
Examples
--------
>>> s1 = Series([1, 2])
>>> s2 = Series([0, 3])
>>> s1.combine(s2, lambda x1, x2: x1 if x1 < x2 else x2)
0 0
1 2
dtype: int64
See Also
--------
Series.combine_first : Combine Series values, choosing the calling
Series's values first
"""
if isinstance(other, Series):
new_index = self.index.union(other.index)
new_name = _maybe_match_name(self, other)
new_values = np.empty(len(new_index), dtype=self.dtype)
for i, idx in enumerate(new_index):
lv = self.get(idx, fill_value)
rv = other.get(idx, fill_value)
with np.errstate(all='ignore'):
new_values[i] = func(lv, rv)
else:
new_index = self.index
with np.errstate(all='ignore'):
new_values = func(self._values, other)
new_name = self.name
return self._constructor(new_values, index=new_index, name=new_name)
def combine_first(self, other):
"""
Combine Series values, choosing the calling Series's values
first. Result index will be the union of the two indexes
Parameters
----------
other : Series
Returns
-------
combined : Series
Examples
--------
>>> s1 = pd.Series([1, np.nan])
>>> s2 = pd.Series([3, 4])
>>> s1.combine_first(s2)
0 1.0
1 4.0
dtype: float64
See Also
--------
Series.combine : Perform elementwise operation on two Series
using a given function
"""
new_index = self.index.union(other.index)
this = self.reindex(new_index, copy=False)
other = other.reindex(new_index, copy=False)
# TODO: do we need name?
name = _maybe_match_name(self, other) # noqa
rs_vals = com._where_compat(isna(this), other._values, this._values)
return self._constructor(rs_vals, index=new_index).__finalize__(self)
def update(self, other):
"""
Modify Series in place using non-NA values from passed
Series. Aligns on index
Parameters
----------
other : Series
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, 5, 6]))
>>> s
0 4
1 5
2 6
dtype: int64
>>> s = pd.Series(['a', 'b', 'c'])
>>> s.update(pd.Series(['d', 'e'], index=[0, 2]))
>>> s
0 d
1 b
2 e
dtype: object
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, 5, 6, 7, 8]))
>>> s
0 4
1 5
2 6
dtype: int64
If ``other`` contains NaNs the corresponding values are not updated
in the original Series.
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, np.nan, 6]))
>>> s
0 4
1 2
2 6
dtype: int64
"""
other = other.reindex_like(self)
mask = notna(other)
self._data = self._data.putmask(mask=mask, new=other, inplace=True)
self._maybe_update_cacher()
# ----------------------------------------------------------------------
# Reindexing, sorting
@Appender(generic._shared_docs['sort_values'] % _shared_doc_kwargs)
def sort_values(self, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
# GH 5856/5853
if inplace and self._is_cached:
raise ValueError("This Series is a view of some other array, to "
"sort in-place you must create a copy")
def _try_kind_sort(arr):
# easier to ask forgiveness than permission
try:
# if kind==mergesort, it can fail for object dtype
return arr.argsort(kind=kind)
except TypeError:
# stable sort not available for object dtype
# uses the argsort default quicksort
return arr.argsort(kind='quicksort')
arr = self._values
sortedIdx = np.empty(len(self), dtype=np.int32)
bad = isna(arr)
good = ~bad
idx = _default_index(len(self))
argsorted = _try_kind_sort(arr[good])
if is_list_like(ascending):
if len(ascending) != 1:
raise ValueError('Length of ascending (%d) must be 1 '
'for Series' % (len(ascending)))
ascending = ascending[0]
if not is_bool(ascending):
raise ValueError('ascending must be boolean')
if not ascending:
argsorted = argsorted[::-1]
if na_position == 'last':
n = good.sum()
sortedIdx[:n] = idx[good][argsorted]
sortedIdx[n:] = idx[bad]
elif na_position == 'first':
n = bad.sum()
sortedIdx[n:] = idx[good][argsorted]
sortedIdx[:n] = idx[bad]
else:
raise ValueError('invalid na_position: {!r}'.format(na_position))
result = self._constructor(arr[sortedIdx], index=self.index[sortedIdx])
if inplace:
self._update_inplace(result)
else:
return result.__finalize__(self)
@Appender(generic._shared_docs['sort_index'] % _shared_doc_kwargs)
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True):
# TODO: this can be combined with DataFrame.sort_index impl as
# almost identical
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
index = self.index
if level:
new_index, indexer = index.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
elif isinstance(index, MultiIndex):
from pandas.core.sorting import lexsort_indexer
labels = index._sort_levels_monotonic()
indexer = lexsort_indexer(labels._get_labels_for_sorting(),
orders=ascending,
na_position=na_position)
else:
from pandas.core.sorting import nargsort
# Check monotonic-ness before sort an index
# GH11080
if ((ascending and index.is_monotonic_increasing) or
(not ascending and index.is_monotonic_decreasing)):
if inplace:
return
else:
return self.copy()
indexer = nargsort(index, kind=kind, ascending=ascending,
na_position=na_position)
indexer = _ensure_platform_int(indexer)
new_index = index.take(indexer)
new_index = new_index._sort_levels_monotonic()
new_values = self._values.take(indexer)
result = self._constructor(new_values, index=new_index)
if inplace:
self._update_inplace(result)
else:
return result.__finalize__(self)
def argsort(self, axis=0, kind='quicksort', order=None):
"""
Overrides ndarray.argsort. Argsorts the value, omitting NA/null values,
and places the result in the same locations as the non-NA values
Parameters
----------
axis : int (can only be zero)
kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See np.sort for more
information. 'mergesort' is the only stable algorithm
order : ignored
Returns
-------
argsorted : Series, with -1 indicated where nan values are present
See also
--------
numpy.ndarray.argsort
"""
values = self._values
mask = isna(values)
if mask.any():
result = Series(-1, index=self.index, name=self.name,
dtype='int64')
notmask = ~mask
result[notmask] = np.argsort(values[notmask], kind=kind)
return self._constructor(result,
index=self.index).__finalize__(self)
else:
return self._constructor(
np.argsort(values, kind=kind), index=self.index,
dtype='int64').__finalize__(self)
def nlargest(self, n=5, keep='first'):
"""
Return the largest `n` elements.
Parameters
----------
n : int
Return this many descending sorted values
keep : {'first', 'last', False}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
Returns
-------
top_n : Series
The n largest values in the Series, in sorted order
Notes
-----
Faster than ``.sort_values(ascending=False).head(n)`` for small `n`
relative to the size of the ``Series`` object.
See Also
--------
Series.nsmallest
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> s = pd.Series(np.random.randn(10**6))
>>> s.nlargest(10) # only sorts up to the N requested
219921 4.644710
82124 4.608745
421689 4.564644
425277 4.447014
718691 4.414137
43154 4.403520
283187 4.313922
595519 4.273635
503969 4.250236
121637 4.240952
dtype: float64
"""
return algorithms.SelectNSeries(self, n=n, keep=keep).nlargest()
def nsmallest(self, n=5, keep='first'):
"""
Return the smallest `n` elements.
Parameters
----------
n : int
Return this many ascending sorted values
keep : {'first', 'last', False}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
Returns
-------
bottom_n : Series
The n smallest values in the Series, in sorted order
Notes
-----
Faster than ``.sort_values().head(n)`` for small `n` relative to
the size of the ``Series`` object.
See Also
--------
Series.nlargest
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> s = pd.Series(np.random.randn(10**6))
>>> s.nsmallest(10) # only sorts up to the N requested
288532 -4.954580
732345 -4.835960
64803 -4.812550
446457 -4.609998
501225 -4.483945
669476 -4.472935
973615 -4.401699
621279 -4.355126
773916 -4.347355
359919 -4.331927
dtype: float64
"""
return algorithms.SelectNSeries(self, n=n, keep=keep).nsmallest()
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
DEPRECATED: use :meth:`Series.sort_index`
Sort Series with MultiIndex by chosen level. Data will be
lexicographically sorted by the chosen level followed by the other
levels (in order)
Parameters
----------
level : int or level name, default None
ascending : bool, default True
Returns
-------
sorted : Series
See Also
--------
Series.sort_index(level=...)
"""
warnings.warn("sortlevel is deprecated, use sort_index(level=...)",
FutureWarning, stacklevel=2)
return self.sort_index(level=level, ascending=ascending,
sort_remaining=sort_remaining)
def swaplevel(self, i=-2, j=-1, copy=True):
"""
Swap levels i and j in a MultiIndex
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : Series
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
new_index = self.index.swaplevel(i, j)
return self._constructor(self._values, index=new_index,
copy=copy).__finalize__(self)
def reorder_levels(self, order):
"""
Rearrange index levels using input order. May not drop or duplicate
levels
Parameters
----------
order : list of int representing new level order.
(reference level by number or key)
axis : where to reorder levels
Returns
-------
type of caller (new object)
"""
if not isinstance(self.index, MultiIndex): # pragma: no cover
raise Exception('Can only reorder levels on a hierarchical axis.')
result = self.copy()
result.index = result.index.reorder_levels(order)
return result
def unstack(self, level=-1, fill_value=None):
"""
Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame.
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default last level
Level(s) to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded:: 0.18.0
Examples
--------
>>> s = pd.Series([1, 2, 3, 4],
... index=pd.MultiIndex.from_product([['one', 'two'], ['a', 'b']]))
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
Returns
-------
unstacked : DataFrame
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value)
# ----------------------------------------------------------------------
# function application
def map(self, arg, na_action=None):
"""
Map values of Series using input correspondence (which can be
a dict, Series, or function)
Parameters
----------
arg : function, dict, or Series
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping function
Returns
-------
y : Series
same index as caller
Examples
--------
Map inputs to outputs (both of type `Series`)
>>> x = pd.Series([1,2,3], index=['one', 'two', 'three'])
>>> x
one 1
two 2
three 3
dtype: int64
>>> y = pd.Series(['foo', 'bar', 'baz'], index=[1,2,3])
>>> y
1 foo
2 bar
3 baz
>>> x.map(y)
one foo
two bar
three baz
If `arg` is a dictionary, return a new Series with values converted
according to the dictionary's mapping:
>>> z = {1: 'A', 2: 'B', 3: 'C'}
>>> x.map(z)
one A
two B
three C
Use na_action to control whether NA values are affected by the mapping
function.
>>> s = pd.Series([1, 2, 3, np.nan])
>>> s2 = s.map('this is a string {}'.format, na_action=None)
0 this is a string 1.0
1 this is a string 2.0
2 this is a string 3.0
3 this is a string nan
dtype: object
>>> s3 = s.map('this is a string {}'.format, na_action='ignore')
0 this is a string 1.0
1 this is a string 2.0
2 this is a string 3.0
3 NaN
dtype: object
See Also
--------
Series.apply: For applying more complex functions on a Series
DataFrame.apply: Apply a function row-/column-wise
DataFrame.applymap: Apply a function elementwise on a whole DataFrame
Notes
-----
When `arg` is a dictionary, values in Series that are not in the
dictionary (as keys) are converted to ``NaN``. However, if the
dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.
provides a method for default values), then this default is used
rather than ``NaN``:
>>> from collections import Counter
>>> counter = Counter()
>>> counter['bar'] += 1
>>> y.map(counter)
1 0
2 1
3 0
dtype: int64
"""
new_values = super(Series, self)._map_values(
arg, na_action=na_action)
return self._constructor(new_values,
index=self.index).__finalize__(self)
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
return self
_agg_doc = dedent("""
Examples
--------
>>> s = Series(np.random.randn(10))
>>> s.agg('min')
-1.3018049988556679
>>> s.agg(['min', 'max'])
min -1.301805
max 1.127688
dtype: float64
See also
--------
pandas.Series.apply
pandas.Series.transform
""")
@Appender(_agg_doc)
@Appender(generic._shared_docs['aggregate'] % dict(
versionadded='.. versionadded:: 0.20.0',
**_shared_doc_kwargs))
def aggregate(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
# we can be called from an inner function which
# passes this meta-data
kwargs.pop('_axis', None)
kwargs.pop('_level', None)
# try a regular apply, this evaluates lambdas
# row-by-row; however if the lambda is expected a Series
# expression, e.g.: lambda x: x-x.quantile(0.25)
# this will fail, so we can try a vectorized evaluation
# we cannot FIRST try the vectorized evaluation, becuase
# then .agg and .apply would have different semantics if the
# operation is actually defined on the Series, e.g. str
try:
result = self.apply(func, *args, **kwargs)
except (ValueError, AttributeError, TypeError):
result = func(self, *args, **kwargs)
return result
agg = aggregate
def apply(self, func, convert_dtype=True, args=(), **kwds):
"""
Invoke function on values of Series. Can be ufunc (a NumPy function
that applies to the entire Series) or a Python function that only works
on single values
Parameters
----------
func : function
convert_dtype : boolean, default True
Try to find better dtype for elementwise function results. If
False, leave as dtype=object
args : tuple
Positional arguments to pass to function in addition to the value
Additional keyword arguments will be passed as keywords to the function
Returns
-------
y : Series or DataFrame if func returns a Series
See also
--------
Series.map: For element-wise operations
Series.agg: only perform aggregating type operations
Series.transform: only perform transformating type operations
Examples
--------
Create a series with typical summer temperatures for each city.
>>> import pandas as pd
>>> import numpy as np
>>> series = pd.Series([20, 21, 12], index=['London',
... 'New York','Helsinki'])
>>> series
London 20
New York 21
Helsinki 12
dtype: int64
Square the values by defining a function and passing it as an
argument to ``apply()``.
>>> def square(x):
... return x**2
>>> series.apply(square)
London 400
New York 441
Helsinki 144
dtype: int64
Square the values by passing an anonymous function as an
argument to ``apply()``.
>>> series.apply(lambda x: x**2)
London 400
New York 441
Helsinki 144
dtype: int64
Define a custom function that needs additional positional
arguments and pass these additional arguments using the
``args`` keyword.
>>> def subtract_custom_value(x, custom_value):
... return x-custom_value
>>> series.apply(subtract_custom_value, args=(5,))
London 15
New York 16
Helsinki 7
dtype: int64
Define a custom function that takes keyword arguments
and pass these arguments to ``apply``.
>>> def add_custom_values(x, **kwargs):
... for month in kwargs:
... x+=kwargs[month]
... return x
>>> series.apply(add_custom_values, june=30, july=20, august=25)
London 95
New York 96
Helsinki 87
dtype: int64
Use a function from the Numpy library.
>>> series.apply(np.log)
London 2.995732
New York 3.044522
Helsinki 2.484907
dtype: float64
"""
if len(self) == 0:
return self._constructor(dtype=self.dtype,
index=self.index).__finalize__(self)
# dispatch to agg
if isinstance(func, (list, dict)):
return self.aggregate(func, *args, **kwds)
# if we are a string, try to dispatch
if isinstance(func, compat.string_types):
return self._try_aggregate_string_function(func, *args, **kwds)
# handle ufuncs and lambdas
if kwds or args and not isinstance(func, np.ufunc):
f = lambda x: func(x, *args, **kwds)
else:
f = func
with np.errstate(all='ignore'):
if isinstance(f, np.ufunc):
return f(self)
# row-wise access
if is_extension_type(self.dtype):
mapped = self._values.map(f)
else:
values = self.asobject
mapped = lib.map_infer(values, f, convert=convert_dtype)
if len(mapped) and isinstance(mapped[0], Series):
from pandas.core.frame import DataFrame
return DataFrame(mapped.tolist(), index=self.index)
else:
return self._constructor(mapped,
index=self.index).__finalize__(self)
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
"""
perform a reduction operation
if we have an ndarray as a value, then simply perform the operation,
otherwise delegate to the object
"""
delegate = self._values
if isinstance(delegate, np.ndarray):
# Validate that 'axis' is consistent with Series's single axis.
self._get_axis_number(axis)
if numeric_only:
raise NotImplementedError('Series.{0} does not implement '
'numeric_only.'.format(name))
with np.errstate(all='ignore'):
return op(delegate, skipna=skipna, **kwds)
return delegate._reduce(op=op, name=name, axis=axis, skipna=skipna,
numeric_only=numeric_only,
filter_type=filter_type, **kwds)
def _reindex_indexer(self, new_index, indexer, copy):
if indexer is None:
if copy:
return self.copy()
return self
# be subclass-friendly
new_values = algorithms.take_1d(self.get_values(), indexer)
return self._constructor(new_values, index=new_index)
def _needs_reindex_multi(self, axes, method, level):
""" check if we do need a multi reindex; this is for compat with
higher dims
"""
return False
@Appender(generic._shared_docs['align'] % _shared_doc_kwargs)
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
return super(Series, self).align(other, join=join, axis=axis,
level=level, copy=copy,
fill_value=fill_value, method=method,
limit=limit, fill_axis=fill_axis,
broadcast_axis=broadcast_axis)
def rename(self, index=None, **kwargs):
"""Alter Series index labels or name
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
Alternatively, change ``Series.name`` with a scalar value.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
index : scalar, hashable sequence, dict-like or function, optional
dict-like or functions are transformations to apply to
the index.
Scalar or hashable sequence-like will alter the ``Series.name``
attribute.
copy : boolean, default True
Also copy underlying data
inplace : boolean, default False
Whether to return a new %(klass)s. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
Returns
-------
renamed : Series (new object)
See Also
--------
pandas.Series.rename_axis
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
"""
kwargs['inplace'] = validate_bool_kwarg(kwargs.get('inplace', False),
'inplace')
non_mapping = is_scalar(index) or (is_list_like(index) and
not is_dict_like(index))
if non_mapping:
return self._set_name(index, inplace=kwargs.get('inplace'))
return super(Series, self).rename(index=index, **kwargs)
@Appender(generic._shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, index=None, **kwargs):
return super(Series, self).reindex(index=index, **kwargs)
@Appender(generic._shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(Series, self).fillna(value=value, method=method,
axis=axis, inplace=inplace,
limit=limit, downcast=downcast,
**kwargs)
@Appender(generic._shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0):
return super(Series, self).shift(periods=periods, freq=freq, axis=axis)
def reindex_axis(self, labels, axis=0, **kwargs):
""" for compatibility with higher dims """
if axis != 0:
raise ValueError("cannot reindex series on non-zero axis!")
msg = ("'.reindex_axis' is deprecated and will be removed in a future "
"version. Use '.reindex' instead.")
warnings.warn(msg, FutureWarning, stacklevel=2)
return self.reindex(index=labels, **kwargs)
def memory_usage(self, index=True, deep=False):
"""Memory usage of the Series
Parameters
----------
index : bool
Specifies whether to include memory usage of Series index
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
scalar bytes of memory consumed
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
v = super(Series, self).memory_usage(deep=deep)
if index:
v += self.index.memory_usage(deep=deep)
return v
@Appender(generic._shared_docs['_take'])
def _take(self, indices, axis=0, convert=True, is_copy=False):
if convert:
indices = maybe_convert_indices(indices, len(self._get_axis(axis)))
indices = _ensure_platform_int(indices)
new_index = self.index.take(indices)
new_values = self._values.take(indices)
result = (self._constructor(new_values, index=new_index,
fastpath=True).__finalize__(self))
# Maybe set copy if we didn't actually change the index.
if is_copy:
if not result._get_axis(axis).equals(self._get_axis(axis)):
result._set_is_copy(self)
return result
def isin(self, values):
"""
Return a boolean :class:`~pandas.Series` showing whether each element
in the :class:`~pandas.Series` is exactly contained in the passed
sequence of ``values``.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
``list`` of one element.
.. versionadded:: 0.18.1
Support for values as a set
Returns
-------
isin : Series (bool dtype)
Raises
------
TypeError
* If ``values`` is a string
See Also
--------
pandas.DataFrame.isin
Examples
--------
>>> s = pd.Series(list('abc'))
>>> s.isin(['a', 'c', 'e'])
0 True
1 False
2 True
dtype: bool
Passing a single string as ``s.isin('a')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['a'])
0 True
1 False
2 False
dtype: bool
"""
result = algorithms.isin(_values_from_object(self), values)
return self._constructor(result, index=self.index).__finalize__(self)
def between(self, left, right, inclusive=True):
"""
Return boolean Series equivalent to left <= series <= right. NA values
will be treated as False
Parameters
----------
left : scalar
Left boundary
right : scalar
Right boundary
Returns
-------
is_between : Series
"""
if inclusive:
lmask = self >= left
rmask = self <= right
else:
lmask = self > left
rmask = self < right
return lmask & rmask
@classmethod
def from_csv(cls, path, sep=',', parse_dates=True, header=None,
index_col=0, encoding=None, infer_datetime_format=False):
"""
Read CSV file (DEPRECATED, please use :func:`pandas.read_csv`
instead).
It is preferable to use the more powerful :func:`pandas.read_csv`
for most general purposes, but ``from_csv`` makes for an easy
roundtrip to and from a file (the exact counterpart of
``to_csv``), especially with a time Series.
This method only differs from :func:`pandas.read_csv` in some defaults:
- `index_col` is ``0`` instead of ``None`` (take first column as index
by default)
- `header` is ``None`` instead of ``0`` (the first row is not used as
the column names)
- `parse_dates` is ``True`` instead of ``False`` (try parsing the index
as datetime by default)
With :func:`pandas.read_csv`, the option ``squeeze=True`` can be used
to return a Series like ``from_csv``.
Parameters
----------
path : string file path or file handle / StringIO
sep : string, default ','
Field delimiter
parse_dates : boolean, default True
Parse dates. Different default from read_table
header : int, default None
Row to use as header (skip prior rows)
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
encoding : string, optional
a string representing the encoding to use if the contents are
non-ascii, for python versions prior to 3
infer_datetime_format: boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
See also
--------
pandas.read_csv
Returns
-------
y : Series
"""
# We're calling `DataFrame.from_csv` in the implementation,
# which will propagate a warning regarding `from_csv` deprecation.
from pandas.core.frame import DataFrame
df = DataFrame.from_csv(path, header=header, index_col=index_col,
sep=sep, parse_dates=parse_dates,
encoding=encoding,
infer_datetime_format=infer_datetime_format)
result = df.iloc[:, 0]
if header is None:
result.index.name = result.name = None
return result
def to_csv(self, path=None, index=True, sep=",", na_rep='',
float_format=None, header=False, index_label=None,
mode='w', encoding=None, date_format=None, decimal='.'):
"""
Write Series to a comma-separated values (csv) file
Parameters
----------
path : string or file handle, default None
File path or object, if None is provided the result is returned as
a string.
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
header : boolean, default False
Write out series name
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
mode : Python write mode, default 'w'
sep : character, default ","
Field delimiter for the output file.
encoding : string, optional
a string representing the encoding to use if the contents are
non-ascii, for python versions prior to 3
date_format: string, default None
Format string for datetime objects.
decimal: string, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data
"""
from pandas.core.frame import DataFrame
df = DataFrame(self)
# result is only a string if no path provided, otherwise None
result = df.to_csv(path, index=index, sep=sep, na_rep=na_rep,
float_format=float_format, header=header,
index_label=index_label, mode=mode,
encoding=encoding, date_format=date_format,
decimal=decimal)
if path is None:
return result
@Appender(generic._shared_docs['to_excel'] % _shared_doc_kwargs)
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True):
df = self.to_frame()
df.to_excel(excel_writer=excel_writer, sheet_name=sheet_name,
na_rep=na_rep, float_format=float_format, columns=columns,
header=header, index=index, index_label=index_label,
startrow=startrow, startcol=startcol, engine=engine,
merge_cells=merge_cells, encoding=encoding,
inf_rep=inf_rep, verbose=verbose)
@Appender(generic._shared_docs['isna'] % _shared_doc_kwargs)
def isna(self):
return super(Series, self).isna()
@Appender(generic._shared_docs['isna'] % _shared_doc_kwargs)
def isnull(self):
return super(Series, self).isnull()
@Appender(generic._shared_docs['notna'] % _shared_doc_kwargs)
def notna(self):
return super(Series, self).notna()
@Appender(generic._shared_docs['notna'] % _shared_doc_kwargs)
def notnull(self):
return super(Series, self).notnull()
def dropna(self, axis=0, inplace=False, **kwargs):
"""
Return Series without null values
Returns
-------
valid : Series
inplace : boolean, default False
Do operation in place.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
kwargs.pop('how', None)
if kwargs:
raise TypeError('dropna() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
axis = self._get_axis_number(axis or 0)
if self._can_hold_na:
result = remove_na_arraylike(self)
if inplace:
self._update_inplace(result)
else:
return result
else:
if inplace:
# do nothing
pass
else:
return self.copy()
valid = lambda self, inplace=False, **kwargs: self.dropna(inplace=inplace,
**kwargs)
@Appender(generic._shared_docs['valid_index'] % {
'position': 'first', 'klass': 'Series'})
def first_valid_index(self):
if len(self) == 0:
return None
mask = isna(self._values)
i = mask.argmin()
if mask[i]:
return None
else:
return self.index[i]
@Appender(generic._shared_docs['valid_index'] % {
'position': 'last', 'klass': 'Series'})
def last_valid_index(self):
if len(self) == 0:
return None
mask = isna(self._values[::-1])
i = mask.argmin()
if mask[i]:
return None
else:
return self.index[len(self) - i - 1]
# ----------------------------------------------------------------------
# Time series-oriented methods
def to_timestamp(self, freq=None, how='start', copy=True):
"""
Cast to datetimeindex of timestamps, at *beginning* of period
Parameters
----------
freq : string, default frequency of PeriodIndex
Desired frequency
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end
Returns
-------
ts : Series with DatetimeIndex
"""
new_values = self._values
if copy:
new_values = new_values.copy()
new_index = self.index.to_timestamp(freq=freq, how=how)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def to_period(self, freq=None, copy=True):
"""
Convert Series from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed)
Parameters
----------
freq : string, default
Returns
-------
ts : Series with PeriodIndex
"""
new_values = self._values
if copy:
new_values = new_values.copy()
new_index = self.index.to_period(freq=freq)
return self._constructor(new_values,
index=new_index).__finalize__(self)
# -------------------------------------------------------------------------
# Datetimelike delegation methods
dt = accessor.AccessorProperty(CombinedDatetimelikeProperties)
# -------------------------------------------------------------------------
# Categorical methods
cat = accessor.AccessorProperty(CategoricalAccessor)
# String Methods
str = accessor.AccessorProperty(strings.StringMethods)
# ----------------------------------------------------------------------
# Add plotting methods to Series
plot = accessor.AccessorProperty(gfx.SeriesPlotMethods,
gfx.SeriesPlotMethods)
hist = gfx.hist_series
Series._setup_axes(['index'], info_axis=0, stat_axis=0, aliases={'rows': 0})
Series._add_numeric_operations()
Series._add_series_only_operations()
Series._add_series_or_dataframe_operations()
# Add arithmetic!
ops.add_flex_arithmetic_methods(Series, **ops.series_flex_funcs)
ops.add_special_arithmetic_methods(Series, **ops.series_special_funcs)
# -----------------------------------------------------------------------------
# Supplementary functions
def _sanitize_index(data, index, copy=False):
""" sanitize an index type to return an ndarray of the underlying, pass
thru a non-Index
"""
if index is None:
return data
if len(data) != len(index):
raise ValueError('Length of values does not match length of ' 'index')
if isinstance(data, ABCIndexClass) and not copy:
pass
elif isinstance(data, PeriodIndex):
data = data.asobject
elif isinstance(data, DatetimeIndex):
data = data._to_embed(keep_tz=True)
elif isinstance(data, np.ndarray):
# coerce datetimelike types
if data.dtype.kind in ['M', 'm']:
data = _sanitize_array(data, index, copy=copy)
return data
def _sanitize_array(data, index, dtype=None, copy=False,
raise_cast_failure=False):
""" sanitize input data to an ndarray, copy if specified, coerce to the
dtype if specified
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
def _try_cast(arr, take_fast_path):
# perf shortcut as this is the most common case
if take_fast_path:
if maybe_castable(arr) and not copy and dtype is None:
return arr
try:
subarr = maybe_cast_to_datetime(arr, dtype)
if not is_extension_type(subarr):
subarr = np.array(subarr, dtype=dtype, copy=copy)
except (ValueError, TypeError):
if is_categorical_dtype(dtype):
subarr = Categorical(arr, dtype.categories,
ordered=dtype.ordered)
elif dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
# GH #846
if isinstance(data, (np.ndarray, Index, Series)):
if dtype is not None:
subarr = np.array(data, copy=False)
# possibility of nan -> garbage
if is_float_dtype(data.dtype) and is_integer_dtype(dtype):
if not isna(data).any():
subarr = _try_cast(data, True)
elif copy:
subarr = data.copy()
else:
subarr = _try_cast(data, True)
elif isinstance(data, Index):
# don't coerce Index types
# e.g. indexes can have different conversions (so don't fast path
# them)
# GH 6140
subarr = _sanitize_index(data, index, copy=copy)
else:
# we will try to copy be-definition here
subarr = _try_cast(data, True)
elif isinstance(data, Categorical):
subarr = data
if copy:
subarr = data.copy()
return subarr
elif isinstance(data, (list, tuple)) and len(data) > 0:
if dtype is not None:
try:
subarr = _try_cast(data, False)
except Exception:
if raise_cast_failure: # pragma: no cover
raise
subarr = np.array(data, dtype=object, copy=copy)
subarr = lib.maybe_convert_objects(subarr)
else:
subarr = maybe_convert_platform(data)
subarr = maybe_cast_to_datetime(subarr, dtype)
elif isinstance(data, range):
# GH 16804
start, stop, step = get_range_parameters(data)
arr = np.arange(start, stop, step, dtype='int64')
subarr = _try_cast(arr, False)
else:
subarr = _try_cast(data, False)
# scalar like, GH
if getattr(subarr, 'ndim', 0) == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = infer_dtype_from_scalar(value)
else:
# need to possibly convert the value here
value = maybe_cast_to_datetime(value, dtype)
subarr = construct_1d_arraylike_from_scalar(
value, len(index), dtype)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
subarr = construct_1d_arraylike_from_scalar(
subarr[0], len(index), subarr.dtype)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise Exception('Data must be 1-dimensional')
else:
subarr = _asarray_tuplesafe(data, dtype=dtype)
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, compat.string_types):
subarr = np.array(data, dtype=object, copy=copy)
return subarr
| bsd-3-clause |
weinbe58/QuSpin | docs/downloads/e69db8689f657bf92d0e9e158dad5bbf/example3.py | 3 | 5466 | from __future__ import print_function, division
import sys,os
# line 4 and line 5 below are for development purposes and can be removed
qspin_path = os.path.join(os.getcwd(),"../../")
sys.path.insert(0,qspin_path)
########################################################################################
# example 3 #
# In this example we show how to use the photon_basis class to study spin chains #
# coupled to a single photon mode. To demonstrate this we simulate a single spin #
# and show how the semi-classical limit emerges in the limit that the number of #
# photons goes to infinity. #
########################################################################################
from quspin.basis import spin_basis_1d,photon_basis # Hilbert space bases
from quspin.operators import hamiltonian # Hamiltonian and observables
from quspin.tools.measurements import obs_vs_time # t_dep measurements
from quspin.tools.Floquet import Floquet,Floquet_t_vec # Floquet Hamiltonian
from quspin.basis.photon import coherent_state # HO coherent state
import numpy as np # generic math functions
#
##### define model parameters #####
Nph_tot=60 # maximum photon occupation
Nph=Nph_tot/2 # mean number of photons in initial coherent state
Omega=3.5 # drive frequency
A=0.8 # spin-photon coupling strength (drive amplitude)
Delta=1.0 # difference between atom energy levels
#
##### set up photon-atom Hamiltonian #####
# define operator site-coupling lists
ph_energy=[[Omega]] # photon energy
at_energy=[[Delta,0]] # atom energy
absorb=[[A/(2.0*np.sqrt(Nph)),0]] # absorption term
emit=[[A/(2.0*np.sqrt(Nph)),0]] # emission term
# define static and dynamics lists
static=[["|n",ph_energy],["x|-",absorb],["x|+",emit],["z|",at_energy]]
dynamic=[]
# compute atom-photon basis
basis=photon_basis(spin_basis_1d,L=1,Nph=Nph_tot)
# compute atom-photon Hamiltonian H
H=hamiltonian(static,dynamic,dtype=np.float64,basis=basis)
#
##### set up semi-classical Hamiltonian #####
# define operators
dipole_op=[[A,0]]
# define periodic drive and its parameters
def drive(t,Omega):
return np.cos(Omega*t)
drive_args=[Omega]
# define semi-classical static and dynamic lists
static_sc=[["z",at_energy]]
dynamic_sc=[["x",dipole_op,drive,drive_args]]
# compute semi-classical basis
basis_sc=spin_basis_1d(L=1)
# compute semi-classical Hamiltonian H_{sc}(t)
H_sc=hamiltonian(static_sc,dynamic_sc,dtype=np.float64,basis=basis_sc)
#
##### define initial state #####
# define atom ground state
#psi_at_i=np.array([1.0,0.0]) # spin-down eigenstate of \sigma^z in QuSpin 0.2.3 or older
psi_at_i=np.array([0.0,1.0]) # spin-down eigenstate of \sigma^z in QuSpin 0.2.6 or newer
# define photon coherent state with mean photon number Nph
psi_ph_i=coherent_state(np.sqrt(Nph),Nph_tot+1)
# compute atom-photon initial state as a tensor product
psi_i=np.kron(psi_at_i,psi_ph_i)
#
##### calculate time evolution #####
# define time vector over 30 driving cycles with 100 points per period
t=Floquet_t_vec(Omega,30) # t.i = initial time, t.T = driving period
# evolve atom-photon state with Hamiltonian H
psi_t=H.evolve(psi_i,t.i,t.vals,iterate=True,rtol=1E-9,atol=1E-9)
# evolve atom GS with semi-classical Hamiltonian H_sc
psi_sc_t=H_sc.evolve(psi_at_i,t.i,t.vals,iterate=True,rtol=1E-9,atol=1E-9)
#
##### define observables #####
# define observables parameters
obs_args={"basis":basis,"check_herm":False,"check_symm":False}
obs_args_sc={"basis":basis_sc,"check_herm":False,"check_symm":False}
# in atom-photon Hilbert space
n=hamiltonian([["|n", [[1.0 ]] ]],[],dtype=np.float64,**obs_args)
sz=hamiltonian([["z|",[[1.0,0]] ]],[],dtype=np.float64,**obs_args)
sy=hamiltonian([["y|", [[1.0,0]] ]],[],dtype=np.complex128,**obs_args)
# in the semi-classical Hilbert space
sz_sc=hamiltonian([["z",[[1.0,0]] ]],[],dtype=np.float64,**obs_args_sc)
sy_sc=hamiltonian([["y",[[1.0,0]] ]],[],dtype=np.complex128,**obs_args_sc)
#
##### calculate expectation values #####
# in atom-photon Hilbert space
Obs_t = obs_vs_time(psi_t,t.vals,{"n":n,"sz":sz,"sy":sy})
O_n, O_sz, O_sy = Obs_t["n"], Obs_t["sz"], Obs_t["sy"]
# in the semi-classical Hilbert space
Obs_sc_t = obs_vs_time(psi_sc_t,t.vals,{"sz_sc":sz_sc,"sy_sc":sy_sc})
O_sz_sc, O_sy_sc = Obs_sc_t["sz_sc"], Obs_sc_t["sy_sc"]
##### plot results #####
import matplotlib.pyplot as plt
import pylab
# define legend labels
str_n = "$\\langle n\\rangle,$"
str_z = "$\\langle\\sigma^z\\rangle,$"
str_x = "$\\langle\\sigma^x\\rangle,$"
str_z_sc = "$\\langle\\sigma^z\\rangle_\\mathrm{sc},$"
str_x_sc = "$\\langle\\sigma^x\\rangle_\\mathrm{sc}$"
# plot spin-photon data
fig = plt.figure()
plt.plot(t.vals/t.T,O_n/Nph,"k",linewidth=1,label=str_n)
plt.plot(t.vals/t.T,O_sz,"c",linewidth=1,label=str_z)
plt.plot(t.vals/t.T,O_sy,"tan",linewidth=1,label=str_x)
# plot semi-classical data
plt.plot(t.vals/t.T,O_sz_sc,"b.",marker=".",markersize=1.8,label=str_z_sc)
plt.plot(t.vals/t.T,O_sy_sc,"r.",marker=".",markersize=2.0,label=str_x_sc)
# label axes
plt.xlabel("$t/T$",fontsize=18)
# set y axis limits
plt.ylim([-1.1,1.4])
# display legend horizontally
plt.legend(loc="upper right",ncol=5,columnspacing=0.6,numpoints=4)
# update axis font size
plt.tick_params(labelsize=16)
# turn on grid
plt.grid(True)
# save figure
plt.tight_layout()
plt.savefig('example3.pdf', bbox_inches='tight')
# show plot
#plt.show()
plt.close() | bsd-3-clause |
MatthieuBizien/scikit-learn | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
mjirik/pysegbase | imcut/pycut.py | 1 | 62734 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Organ segmentation
Example:
$ pycat -f head.mat -o brain.mat
"""
from __future__ import absolute_import, division, print_function
import logging
logger = logging.getLogger(__name__)
# import unittest
# from optparse import OptionParser
import argparse
import sys
# import os.path as op
from scipy.io import loadmat
import numpy as np
import time
import scipy.stats
import copy
import pygco
# from pygco import cut_from_graph
# from . import models
from .image_manipulation import (
seed_zoom,
zoom_to_shape,
resize_to_shape,
resize_to_shape_with_zoom,
select_objects_by_seeds,
crop,
uncrop,
)
from .models import Model, Model3D, defaultmodelparams, methods, accepted_methods
from .graph import Graph
# from imcut.graph import Graph
class ImageGraphCut:
"""
Interactive Graph Cut.
ImageGraphCut(data, zoom, modelparams)
scale
Example:
igc = ImageGraphCut(data)
igc.interactivity()
igc.make_gc()
igc.show_segmentation()
"""
def __init__(
self,
img,
modelparams=None,
segparams=None,
voxelsize=None,
debug_images=False,
volume_unit="mm3",
interactivity_loop_finish_fcn=None,
keep_graph_properties=False,
):
"""
Args:
:param img: input data
:param modelparams: parameters of model
:param segparams: segmentation parameters
use_apriori_if_available - set self.apriori to ndimage with same shape as img
apriori_gamma: influence of apriory information. 0 means no influence, 1.0 is 100% use of
apriori information
boundary_penalties_sigma
pairwise_alpha_per_square_unit: the pairwise_alpha is rewriten if this parameter is used.
pairwise_alpha = pairwise_alpha_per_square_unit / mean(voxelsize)
return_only_object_with_seeds: Ignore unconnected parts of segmentation, default False
:param voxelsize: size of voxel
:param debug_images: use to show debug images with matplotlib
:param volume_unit: define string of volume unit. Default is "mm3"
:param keep_graph_properties: Do not delete some usefull varibales like
msinds, unariesalt, nlinks, temp_msgc_resized_segmentation, temp_msgc_resized_img and
temp_msgc_resized_seeds
Returns:
"""
logger.debug(
"modelparams: %s, segparams: %s, voxelsize: %s, debug_images: %s",
modelparams,
segparams,
voxelsize,
debug_images,
)
if modelparams is None:
modelparams = {}
if segparams is None:
segparams = {}
if voxelsize is None:
voxelsize = [1, 1, 1]
self.voxelsize = np.asarray(voxelsize)
if voxelsize is not None:
self.voxel_volume = np.prod(voxelsize)
else:
self.voxel_volume = None
self._update_segparams(segparams, modelparams)
self.img = img
self.tdata = {}
self.segmentation = None
# self.segparams = segparams
self.seeds = np.zeros(self.img.shape, dtype=np.int8)
self.debug_images = debug_images
self.volume_unit = volume_unit
self.interactivity_counter = 0
self.stats = {"tlinks shape": [], "nlinks shape": []}
self.apriori = None
self.interactivity_loop_finish_funcion = interactivity_loop_finish_fcn
self.keep_temp_properties = keep_graph_properties
self.msinds = None
self.unariesalt2 = None
self.nlinks = None
def _update_segparams(self, segparams={}, modelparams={}):
# default values use_boundary_penalties
# self.segparams = {'pairwiseAlpha':10, 'use_boundary_penalties':False}
self.segparams = {
"method": "graphcut",
"pairwise_alpha": 20,
"use_boundary_penalties": False,
"boundary_penalties_sigma": 200,
"boundary_penalties_weight": 30,
"return_only_object_with_seeds": False,
"use_old_similarity": True, # New similarity not implemented @TODO
"use_extra_features_for_training": False,
"use_apriori_if_available": True,
"apriori_gamma": 0.1,
}
if "modelparams" in segparams.keys():
modelparams = segparams["modelparams"]
self.segparams.update(segparams)
if "pairwise_alpha_per_square_unit" in segparams:
self.segparams["pairwise_alpha"] = self.segparams[
"pairwise_alpha_per_square_unit"
] / np.mean(self.voxel_volume)
self.modelparams = defaultmodelparams.copy()
self.modelparams.update(modelparams)
self.mdl = Model(modelparams=self.modelparams)
def load(self, filename, fv_extern=None):
"""
Read model stored in the file.
:param filename: Path to file with model
:param fv_extern: external feature vector function is passed here
:return:
"""
self.modelparams["mdl_stored_file"] = filename
if fv_extern is not None:
self.modelparams["fv_extern"] = fv_extern
# segparams['modelparams'] = {
# 'mdl_stored_file': mdl_stored_file,
# # 'fv_extern': fv_function
# }
self.mdl = Model(modelparams=self.modelparams)
def interactivity_loop(self, pyed):
# @TODO stálo by za to, přehodit tlačítka na myši. Levé má teď
# jedničku, pravé dvojku. Pravým však zpravidla označujeme pozadí a tak
# nám vyjde popředí jako nula a pozadí jako jednička.
# Tím také dopadne jinak interaktivní a neinteraktivní varianta.
# import sys
# print "logger ", logging.getLogger().getEffectiveLevel()
# from guppy import hpy
# h = hpy()
# print h.heap()
# import pdb
# logger.debug("obj gc " + str(sys.getsizeof(self)))
self.set_seeds(pyed.getSeeds())
self.run()
pyed.setContours(1 - self.segmentation.astype(np.int8))
# if self.interactivity_loop_finish_funcion is None:
# # TODO remove this statement after lisa package update (12.5.2018)
# from lisa import audiosupport
# self.interactivity_loop_finish_funcion = audiosupport.beep
if self.interactivity_loop_finish_funcion is not None:
self.interactivity_loop_finish_funcion()
self.interactivity_counter += 1
logger.debug("interactivity counter: %s", str(self.interactivity_counter))
def __uniform_npenalty_fcn(self, orig_shape):
return np.ones(orig_shape, dtype=np.int8)
def __ms_npenalty_fcn(self, axis, mask, orig_shape):
"""
:param axis: direction of edge
:param mask: 3d ndarray with ones where is fine resolution
Neighboorhood penalty between small pixels should be smaller then in
bigger tiles. This is the way how to set it.
"""
maskz = zoom_to_shape(mask, orig_shape)
maskz_new = np.zeros(orig_shape, dtype=np.int16)
maskz_new[maskz == 0] = self._msgc_npenalty_table[0, axis]
maskz_new[maskz == 1] = self._msgc_npenalty_table[1, axis]
# import sed3
# ed = sed3.sed3(maskz_new)
# import ipdb; ipdb.set_trace() # noqa BREAKPOINT
return maskz_new
def __msgc_step0_init(self):
# table with size 2 * self.img.ndims
# first axis describe whether is the edge between lowres(0) or highres(1) voxels
# second axis describe edge direction (edge axis)
self._msgc_npenalty_table = np.array(
[
[self.segparams["block_size"] * self.segparams["tile_zoom_constant"]]
* self.img.ndim,
[1] * self.img.ndim,
]
)
# self.__msgc_npenalty_lowres =
# self.__msgc_npenalty_higres = 1
def __msgc_step12_low_resolution_segmentation(self):
"""
Get the segmentation and the
:return:
"""
import scipy
start = self._start_time
# ===== low resolution data processing
# default parameters
# TODO segparams_lo and segparams_hi je tam asi zbytecně
sparams_lo = {
"boundary_dilatation_distance": 2,
"block_size": 6,
"use_boundary_penalties": True,
"boundary_penalties_weight": 1,
"tile_zoom_constant": 1,
}
sparams_lo.update(self.segparams)
sparams_hi = copy.copy(sparams_lo)
# sparams_lo['boundary_penalties_weight'] = (
# sparams_lo['boundary_penalties_weight'] *
# sparams_lo['block_size'])
self.segparams = sparams_lo
self.stats["t1"] = time.time() - start
# step 1: low res GC
hiseeds = self.seeds
# ms_zoom = 4 # 0.125 #self.segparams['scale']
# ms_zoom = self.segparams['block_size']
# loseeds = pyed.getSeeds()
# logger.debug("msc " + str(np.unique(hiseeds)))
loseeds = seed_zoom(hiseeds, self.segparams["block_size"])
hard_constraints = True
self.seeds = loseeds
modelparams_hi = self.modelparams.copy()
# feature vector will be computed from selected voxels
self.modelparams["use_extra_features_for_training"] = True
# TODO what with voxels? It is used from here
# hiseeds and hiimage is used to create intensity model
self.voxels1 = self.img[hiseeds == 1].reshape(-1, 1)
self.voxels2 = self.img[hiseeds == 2].reshape(-1, 1)
# this is how to compute with loseeds resolution but in wrong way
# self.voxels1 = self.img[self.seeds == 1]
# self.voxels2 = self.img[self.seeds == 2]
# self.voxels1 = pyed.getSeedsVal(1)
# self.voxels2 = pyed.getSeedsVal(2)
img_orig = self.img
# TODO this should be done with resize_to_shape_whith_zoom
zoom = np.asarray(loseeds.shape).astype(np.float) / img_orig.shape
self.img = scipy.ndimage.interpolation.zoom(img_orig, zoom, order=0)
voxelsize_orig = self.voxelsize
logger.debug("zoom %s", zoom)
logger.debug("vs %s", self.voxelsize)
self.voxelsize = self.voxelsize * zoom
# self.img = resize_to_shape_with_zoom(img_orig, loseeds.shape, 1.0 / ms_zoom, order=0)
# this step set the self.segmentation
self.__single_scale_gc_run()
# logger.debug(
# 'segmentation - max: %d min: %d' % (
# np.max(self.segmentation),
# np.min(self.segmentation)
# )
# )
logger.debug(
"segmentation: %s", scipy.stats.describe(self.segmentation, axis=None)
)
self.modelparams = modelparams_hi
self.voxelsize = voxelsize_orig
self.img = img_orig
self.seeds = hiseeds
self.stats["t2"] = time.time() - start
return hard_constraints
def __msgc_step3_discontinuity_localization(self):
"""
Estimate discontinuity in basis of low resolution image segmentation.
:return: discontinuity in low resolution
"""
import scipy
start = self._start_time
seg = 1 - self.segmentation.astype(np.int8)
self.stats["low level object voxels"] = np.sum(seg)
self.stats["low level image voxels"] = np.prod(seg.shape)
# in seg is now stored low resolution segmentation
# back to normal parameters
# step 2: discontinuity localization
# self.segparams = sparams_hi
seg_border = scipy.ndimage.filters.laplace(seg, mode="constant")
logger.debug("seg_border: %s", scipy.stats.describe(seg_border, axis=None))
# logger.debug(str(np.max(seg_border)))
# logger.debug(str(np.min(seg_border)))
seg_border[seg_border != 0] = 1
logger.debug("seg_border: %s", scipy.stats.describe(seg_border, axis=None))
# scipy.ndimage.morphology.distance_transform_edt
boundary_dilatation_distance = self.segparams["boundary_dilatation_distance"]
seg = scipy.ndimage.morphology.binary_dilation(
seg_border,
# seg,
np.ones(
[
(boundary_dilatation_distance * 2) + 1,
(boundary_dilatation_distance * 2) + 1,
(boundary_dilatation_distance * 2) + 1,
]
),
)
if self.keep_temp_properties:
self.temp_msgc_lowres_discontinuity = seg
else:
self.temp_msgc_lowres_discontinuity = None
if self.debug_images:
import sed3
pd = sed3.sed3(seg_border) # ), contour=seg)
pd.show()
pd = sed3.sed3(seg) # ), contour=seg)
pd.show()
# segzoom = scipy.ndimage.interpolation.zoom(seg.astype('float'), zoom,
# order=0).astype('int8')
self.stats["t3"] = time.time() - start
return seg
def __msgc_step45678_hi2lo_construct_graph(self, hard_constraints, seg):
# step 4: indexes of new dual graph
hiseeds = self.seeds
msinds, mask_orig = self.__hi2lo_multiscale_indexes(
seg, self.img.shape
) # , ms_zoom)
logger.debug("multiscale inds " + str(msinds.shape))
# if deb:
# import sed3
# pd = sed3.sed3(msinds, contour=seg)
# pd.show()
# intensity values for indexes
# @TODO compute average values for low resolution
ms_img = self.img
# @TODO __ms_create_nlinks , use __ordered_values_by_indexes
# import pdb; pdb.set_trace() # BREAKPOINT
# pyed.setContours(seg)
# there is need to set correct weights between neighbooring pixels
# this is not nice hack.
# @TODO reorganise segparams and create_nlinks function
# orig_shape = img_orig.shape
self.stats["t4"] = time.time() - self._start_time
def local_ms_npenalty(x):
return self.__ms_npenalty_fcn(x, seg, self.img.shape)
# here are not unique couples of nodes
nlinks_not_unique = self.__create_nlinks(
ms_img,
msinds,
# boundary_penalties_fcn=ms_npenalty_fcn
boundary_penalties_fcn=local_ms_npenalty,
)
# nlinks created
self.stats["t5"] = time.time() - self._start_time
# get unique set
# remove repetitive link from one pixel to another
nlinks = ms_remove_repetitive_link(nlinks_not_unique)
# now remove cycle link
self.stats["t6"] = time.time() - self._start_time
nlinks = np.array([line for line in nlinks if line[0] != line[1]])
self.stats["t7"] = time.time() - self._start_time
# import ipdb; ipdb.set_trace() # noqa BREAKPOINT
# tlinks - indexes, data_merge
ms_values_lin = self.__ordered_values_by_indexes(self.img, msinds)
seeds = hiseeds
# seeds = pyed.getSeeds()
# if deb:
# import sed3
# se = sed3.sed3(seeds)
# se.show()
ms_seeds_lin = self.__ordered_values_by_indexes(seeds, msinds)
# logger.debug("unique seeds " + str(np.unique(seeds)))
# logger.debug("unique seeds " + str(np.unique(ms_seeds_lin)))
mul_mask, mul_val = self.__msgc_tlinks_area_weight_from_low_segmentation(
mask_orig
)
mul_mask_lin = self.__ordered_values_by_indexes(mul_mask, msinds)
area_weight = 1
# TODO vyresit voxelsize
unariesalt = self.__create_tlinks(
ms_values_lin,
voxelsize=self.voxelsize,
# self.voxels1, self.voxels2,
seeds=ms_seeds_lin,
area_weight=area_weight,
hard_constraints=hard_constraints,
mul_mask=mul_mask_lin,
mul_val=mul_val,
)
unariesalt2 = unariesalt.reshape(-1, 2)
self.stats["t8"] = time.time() - self._start_time
return nlinks, unariesalt2, msinds
def __msgc_tlinks_area_weight_from_low_segmentation(self, loseg):
mul_val = (self.segparams["block_size"]) ** 3
# TODO find correct value
# mul_val = 1
logger.debug(
"w: %s, loseg: %s, loseg.shape: %s",
mul_val,
scipy.stats.describe(loseg, axis=None),
loseg.shape,
)
if loseg.shape == self.img.shape:
loseg_resized = loseg
else:
# resize
loseg_resized = zoom_to_shape(loseg, self.img.shape, dtype=np.int8)
pass
# area_weight = loseg_resized.astype(np.int8) * w
mul_mask = ~loseg_resized.astype(np.bool)
return mul_mask, mul_val
def __msgc_step9_finish_perform_gc_and_reshape(self, nlinks, unariesalt2, msinds):
start = self._start_time
# create potts pairwise
# pairwiseAlpha = -10
pairwise = -(np.eye(2) - 1)
pairwise = (self.segparams["pairwise_alpha"] * pairwise).astype(np.int32)
# print 'data shape ', img_orig.shape
# print 'nlinks sh ', nlinks.shape
# print 'tlinks sh ', unariesalt.shape
# print "cut_from_graph"
# print "unaries sh ", unariesalt.reshape(-1,2).shape
# print "nlinks sh", nlinks.shape
self.stats["t9"] = time.time() - start
self.stats["tlinks shape"].append(unariesalt2.shape)
self.stats["nlinks shape"].append(nlinks.shape)
start_gc = time.time()
# Same functionality is in self.seg_data()
result_graph = pygco.cut_from_graph(
nlinks.astype(np.int32),
unariesalt2.astype(np.int32),
pairwise.astype(np.int32),
)
elapsed = time.time() - start_gc
self.stats["gc time"] = elapsed
self.stats["t10"] = time.time() - start
# probably not necessary
# del nlinks
# del unariesalt
# print "unaries %.3g , %.3g" % (np.max(unariesalt),np.min(unariesalt))
# @TODO get back original data
# result_labeling = result_graph.reshape(data.shape)
result_labeling = result_graph[msinds]
# import py3DSeedEditor
# ped = py3DSeedEditor.py3DSeedEditor(result_labeling)
# ped.show()
result_labeling = self.__just_objects_with_seeds_if_required(result_labeling)
self.segmentation = result_labeling
if self.keep_temp_properties:
self.msinds = msinds
self.unariesalt2 = unariesalt2
self.nlinks = nlinks
else:
self.msinds = None
self.unariesalt2 = None
self.nlinks = None
def __multiscale_gc_lo2hi_run(self): # , pyed):
"""
Run Graph-Cut segmentation with refinement of low resolution multiscale graph.
In first step is performed normal GC on low resolution data
Second step construct finer grid on edges of segmentation from first
step.
There is no option for use without `use_boundary_penalties`
"""
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
self._msgc_lo2hi_resize_init()
self.__msgc_step0_init()
hard_constraints = self.__msgc_step12_low_resolution_segmentation()
# ===== high resolution data processing
seg = self.__msgc_step3_discontinuity_localization()
self.stats["t3.1"] = time.time() - self._start_time
graph = Graph(
seg,
voxelsize=self.voxelsize,
nsplit=self.segparams["block_size"],
edge_weight_table=self._msgc_npenalty_table,
compute_low_nodes_index=True,
)
# graph.run() = graph.generate_base_grid() + graph.split_voxels()
# graph.run()
graph.generate_base_grid()
self.stats["t3.2"] = time.time() - self._start_time
graph.split_voxels()
self.stats["t3.3"] = time.time() - self._start_time
self.stats.update(graph.stats)
self.stats["t4"] = time.time() - self._start_time
# TODO use mul_mask and mul_val
mul_mask, mul_val = self.__msgc_tlinks_area_weight_from_low_segmentation(seg)
area_weight = 1
unariesalt = self.__create_tlinks(
self.img,
self.voxelsize,
self.seeds,
area_weight=area_weight,
hard_constraints=hard_constraints,
mul_mask=None,
mul_val=None,
)
# N-links prepared
self.stats["t5"] = time.time() - self._start_time
un, ind = np.unique(graph.msinds, return_index=True)
self.stats["t6"] = time.time() - self._start_time
self.stats["t7"] = time.time() - self._start_time
unariesalt2_lo2hi = np.hstack(
[unariesalt[ind, 0, 0].reshape(-1, 1), unariesalt[ind, 0, 1].reshape(-1, 1)]
)
nlinks_lo2hi = np.hstack([graph.edges, graph.edges_weights.reshape(-1, 1)])
if self.debug_images:
import sed3
ed = sed3.sed3(unariesalt[:, :, 0].reshape(self.img.shape))
ed.show()
import sed3
ed = sed3.sed3(unariesalt[:, :, 1].reshape(self.img.shape))
ed.show()
# ed = sed3.sed3(seg)
# ed.show()
# import sed3
# ed = sed3.sed3(graph.data)
# ed.show()
# import sed3
# ed = sed3.sed3(graph.msinds)
# ed.show()
# nlinks, unariesalt2, msinds = self.__msgc_step45678_construct_graph(area_weight, hard_constraints, seg)
# self.__msgc_step9_finish_perform_gc_and_reshape(nlinks, unariesalt2, msinds)
self.__msgc_step9_finish_perform_gc_and_reshape(
nlinks_lo2hi, unariesalt2_lo2hi, graph.msinds
)
self._msgc_lo2hi_resize_clean_finish()
def __multiscale_gc_hi2lo_run(self): # , pyed):
"""
Run Graph-Cut segmentation with simplifiyng of high resolution multiscale graph.
In first step is performed normal GC on low resolution data
Second step construct finer grid on edges of segmentation from first
step.
There is no option for use without `use_boundary_penalties`
"""
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
self.__msgc_step0_init()
hard_constraints = self.__msgc_step12_low_resolution_segmentation()
# ===== high resolution data processing
seg = self.__msgc_step3_discontinuity_localization()
nlinks, unariesalt2, msinds = self.__msgc_step45678_hi2lo_construct_graph(
hard_constraints, seg
)
self.__msgc_step9_finish_perform_gc_and_reshape(nlinks, unariesalt2, msinds)
def __ordered_values_by_indexes(self, data, inds):
"""
Return values (intensities) by indexes.
Used for multiscale graph cut.
data = [[0 1 1],
[0 2 2],
[0 2 2]]
inds = [[0 1 2],
[3 4 4],
[5 4 4]]
return: [0, 1, 1, 0, 2, 0]
If the data are not consistent, it will take the maximal value
"""
# get unique labels and their first indexes
# lab, linds = np.unique(inds, return_index=True)
# compute values by indexes
# values = data.reshape(-1)[linds]
# alternative slow implementation
# if there are different data on same index, it will take
# maximal value
# lab = np.unique(inds)
# values = [0]*len(lab)
# for label in lab:
# values[label] = np.max(data[inds == label])
#
# values = np.asarray(values)
# yet another implementation
values = [None] * (np.max(inds) + 1)
linear_inds = inds.ravel()
linear_data = data.ravel()
for i in range(0, len(linear_inds)):
# going over all data pixels
if values[linear_inds[i]] is None:
# this index is found for first
values[linear_inds[i]] = linear_data[i]
elif values[linear_inds[i]] < linear_data[i]:
# here can be changed maximal or minimal value
values[linear_inds[i]] = linear_data[i]
values = np.asarray(values)
return values
def __hi2lo_multiscale_indexes(self, mask, orig_shape): # , zoom):
"""
Function computes multiscale indexes of ndarray.
mask: Says where is original resolution (0) and where is small
resolution (1). Mask is in small resolution.
orig_shape: Original shape of input data.
zoom: Usually number greater then 1
result = [[0 1 2],
[3 4 4],
[5 4 4]]
"""
mask_orig = zoom_to_shape(mask, orig_shape, dtype=np.int8)
inds_small = np.arange(mask.size).reshape(mask.shape)
inds_small_in_orig = zoom_to_shape(inds_small, orig_shape, dtype=np.int8)
inds_orig = np.arange(np.prod(orig_shape)).reshape(orig_shape)
# inds_orig = inds_orig * mask_orig
inds_orig += np.max(inds_small_in_orig) + 1
# print 'indexes'
# import py3DSeedEditor as ped
# import pdb; pdb.set_trace() # BREAKPOINT
# '==' is not the same as 'is' for numpy.array
inds_small_in_orig[mask_orig == True] = inds_orig[mask_orig == True] # noqa
inds = inds_small_in_orig
# print np.max(inds)
# print np.min(inds)
inds = relabel_squeeze(inds)
logger.debug(
"Index after relabeling: %s", scipy.stats.describe(inds, axis=None)
)
# logger.debug("Minimal index after relabeling: " + str(np.min(inds)))
# inds_orig[mask_orig==True] = 0
# inds_small_in_orig[mask_orig==False] = 0
# inds = (inds_orig + np.max(inds_small_in_orig) + 1) + inds_small_in_orig
return inds, mask_orig
def interactivity(self, min_val=None, max_val=None, qt_app=None):
"""
Interactive seed setting with 3d seed editor
"""
from .seed_editor_qt import QTSeedEditor
from PyQt5.QtWidgets import QApplication
if min_val is None:
min_val = np.min(self.img)
if max_val is None:
max_val = np.max(self.img)
window_c = (max_val + min_val) / 2 # .astype(np.int16)
window_w = max_val - min_val # .astype(np.int16)
if qt_app is None:
qt_app = QApplication(sys.argv)
pyed = QTSeedEditor(
self.img,
modeFun=self.interactivity_loop,
voxelSize=self.voxelsize,
seeds=self.seeds,
volume_unit=self.volume_unit,
)
pyed.changeC(window_c)
pyed.changeW(window_w)
qt_app.exec_()
def set_seeds(self, seeds):
"""
Function for manual seed setting. Sets variable seeds and prepares
voxels for density model.
:param seeds: ndarray (0 - nothing, 1 - object, 2 - background,
3 - object just hard constraints, no model training, 4 - background
just hard constraints, no model training)
"""
if self.img.shape != seeds.shape:
raise Exception("Seeds must be same size as input image")
self.seeds = seeds.astype("int8")
self.voxels1 = self.img[self.seeds == 1]
self.voxels2 = self.img[self.seeds == 2]
def run(self, run_fit_model=True):
"""
Run the Graph Cut segmentation according to preset parameters.
:param run_fit_model: Allow to skip model fit when the model is prepared before
:return:
"""
if run_fit_model:
self.fit_model(self.img, self.voxelsize, self.seeds)
self._start_time = time.time()
if self.segparams["method"].lower() in ("graphcut", "gc"):
self.__single_scale_gc_run()
elif self.segparams["method"].lower() in (
"multiscale_graphcut",
"multiscale_gc",
"msgc",
"msgc_lo2hi",
"lo2hi",
"multiscale_graphcut_lo2hi",
):
logger.debug("performing multiscale Graph-Cut lo2hi")
self.__multiscale_gc_lo2hi_run()
elif self.segparams["method"].lower() in (
"msgc_hi2lo",
"hi2lo",
"multiscale_graphcut_hi2lo",
):
logger.debug("performing multiscale Graph-Cut hi2lo")
self.__multiscale_gc_hi2lo_run()
else:
logger.error("Unknown segmentation method: " + self.segparams["method"])
def __single_scale_gc_run(self):
res_segm = self._ssgc_prepare_data_and_run_computation(
# self.img,
# self
# self.voxels1, self.voxels2,
# seeds=self.seeds
)
res_segm = self.__just_objects_with_seeds_if_required(res_segm)
self.segmentation = res_segm.astype(np.int8)
def __just_objects_with_seeds_if_required(self, res_segm):
print("return_only_object_with_seeds")
if self.segparams["return_only_object_with_seeds"]:
logger.debug("return_only_object_with_seeds")
try:
# because of negative problem is as 1 segmented background and
# as 0 is segmented foreground
# import thresholding_functions
# newData = thresholding_functions.getPriorityObjects(
# newData = get_priority_objects(
# (1 - res_segm),
# nObj=-1,
# seeds=(self.seeds == 1).nonzero(),
# debug=False
# )
newData = select_objects_by_seeds(1 - res_segm, seeds=self.seeds)
res_segm = 1 - newData
except:
import traceback
logger.warning("Cannot import thresholding_funcions")
traceback.print_exc()
return res_segm
def __set_hard_hard_constraints(self, tdata1, tdata2, seeds):
"""
it works with seed labels:
0: nothing
1: object 1 - full seeds
2: object 2 - full seeds
3: object 1 - not a training seeds
4: object 2 - not a training seeds
"""
seeds_mask = (seeds == 1) | (seeds == 3)
tdata2[seeds_mask] = np.max(tdata2) + 1
tdata1[seeds_mask] = 0
seeds_mask = (seeds == 2) | (seeds == 4)
tdata1[seeds_mask] = np.max(tdata1) + 1
tdata2[seeds_mask] = 0
return tdata1, tdata2
def _boundary_penalties_array(self, axis, sigma=None):
import scipy.ndimage.filters as scf
# for axis in range(0,dim):
filtered = scf.prewitt(self.img, axis=axis)
if sigma is None:
sigma2 = np.var(self.img)
else:
sigma2 = sigma ** 2
filtered = np.exp(-np.power(filtered, 2) / (256 * sigma2))
# srovnán hodnot tak, aby to vycházelo mezi 0 a 100
# cc = 10
# filtered = ((filtered - 1)*cc) + 10
# logger.debug(
# 'ax %.1g max %.3g min %.3g avg %.3g' % (
# axis, np.max(filtered), np.min(filtered), np.mean(filtered))
# )
logger.debug(
"boundary penalties, axis: %s, filtered: %s",
axis,
scipy.stats.describe(filtered, axis=None),
)
#
# @TODO Check why forumla with exp is not stable
# Oproti Boykov2001b tady nedělím dvojkou. Ta je tam jen proto,
# aby to slušně vycházelo, takže jsem si jí upravil
# Originální vzorec je
# Bpq = exp( - (Ip - Iq)^2 / (2 * \sigma^2) ) * 1 / dist(p,q)
# filtered = (-np.power(filtered,2)/(16*sigma))
# Přičítám tu 256 což je empiricky zjištěná hodnota - aby to dobře vyšlo
# nedávám to do exponenciely, protože je to numericky nestabilní
# filtered = filtered + 255 # - np.min(filtered2) + 1e-30
# Ještě by tady měl a následovat exponenciela, ale s ní je to numericky
# nestabilní. Netuším proč.
# if dim >= 1:
# odecitame od sebe tentyz obrazek
# df0 = self.img[:-1,:] - self.img[]
# diffs.insert(0,
return filtered
def _reshape_unariesalt_to_similarity(self, unariesalt, shape):
# if unariesalt is None:
# unariesalt = self.unariesalt2
# if shape is None:
# shape = self.img.shape
# if hasattr(self, "temp_msgc_resized_img"):
# shape = self.temp_msgc_resized_img.shape
tdata1 = unariesalt[..., 0].reshape(shape)
tdata2 = unariesalt[..., 1].reshape(shape)
return tdata1, tdata2
def _debug_show_unariesalt(
self, unariesalt, suptitle=None, slice_number=None, show=True, bins=20
):
shape = self.img.shape
# print("unariesalt dtype ", unariesalt.dtype)
tdata1, tdata2 = self._reshape_unariesalt_to_similarity(unariesalt, shape)
self._debug_show_tdata_images(
tdata1,
tdata2,
suptitle=suptitle,
slice_number=slice_number,
show=show,
bins=bins,
)
def _debug_show_tdata_images(
self, tdata1, tdata2, suptitle=None, slice_number=None, show=True, bins=20
):
# Show model parameters
logger.debug("tdata1 shape %s", str(tdata1.shape))
if slice_number is None:
slice_number = int(tdata1.shape[0] / 2)
try:
import matplotlib.pyplot as plt
fig = plt.figure()
if suptitle is not None:
fig.suptitle(suptitle)
ax = fig.add_subplot(121)
ax.imshow(tdata1[slice_number, :, :])
# plt.colorbar(ax=ax)
# fig = plt.figure()
ax = fig.add_subplot(122)
ax.imshow(tdata2[slice_number, :, :])
# plt.colorbar(ax=ax)
# print('tdata1 max ', np.max(tdata1), ' min ', np.min(tdata1), " dtype ", tdata1.dtype)
# print('tdata2 max ', np.max(tdata2), ' min ', np.min(tdata2), " dtype ", tdata2.dtype)
# logger.debug('tdata1 max ' + str(np.max(tdata1)) + ' min ' + str(np.min(tdata1)) + " dtype " +str( tdata1.dtype))
logger.debug("tdata1: %s", scipy.stats.describe(tdata1, axis=None))
logger.debug("tdata2: %s", scipy.stats.describe(tdata2, axis=None))
# print('tdata2 max ', np.max(tdata2), ' min ', np.min(tdata2), " dtype ", tdata2.dtype)
# # histogram
# fig = plt.figure()
# vx1 = data[seeds==1]
# vx2 = data[seeds==2]
# plt.hist([vx1, vx2], 30)
# plt.hist(voxels2)
except:
import traceback
print(traceback.format_exc())
logger.debug("problem with showing debug images")
try:
fig = plt.figure()
if suptitle is not None:
fig.suptitle(suptitle)
ax = fig.add_subplot(121)
plt.hist(tdata1.flatten(), bins=bins)
ax = fig.add_subplot(122)
plt.hist(tdata2.flatten(), bins=bins)
except:
import traceback
print(traceback.format_exc())
if show:
plt.show()
return fig
def debug_show_model(
self, start=-1000, stop=1000, nsteps=400, suptitle=None, show=True
):
import matplotlib.pyplot as plt
fig = plt.figure()
if suptitle is not None:
fig.suptitle(suptitle)
ax = fig.add_subplot(111)
hstx = np.linspace(start, stop, nsteps)
ax.plot(hstx, np.exp(self.mdl.likelihood_from_image(hstx, self.voxelsize, 1)))
ax.plot(hstx, np.exp(self.mdl.likelihood_from_image(hstx, self.voxelsize, 2)))
if show:
plt.show()
return fig
def fit_model(self, data=None, voxelsize=None, seeds=None):
if data is None:
data = self.img
if voxelsize is None:
voxelsize = self.voxelsize
if seeds is None:
seeds = self.seeds
# TODO rewrite just for one class and call separatelly for obj and background.
# TODO rename voxels1 and voxels2
# voxe1s1 and voxels2 are used only in this function for multiscale graphcut
# threre can be some
# Dobře to fungovalo area_weight = 0.05 a cc = 6 a diference se
# počítaly z :-1
# self.mdl.trainFromSomething(data, seeds, 1, self.voxels1)
# self.mdl.trainFromSomething(data, seeds, 2, self.voxels2)
if self.segparams["use_extra_features_for_training"]:
self.mdl.fit(self.voxels1, 1)
self.mdl.fit(self.voxels2, 2)
else:
self.mdl.fit_from_image(data, voxelsize, seeds, [1, 2]),
# as we convert to int, we need to multipy to get sensible values
def __similarity_for_tlinks_obj_bgr(
self,
data,
voxelsize,
# voxels1, voxels2,
# seeds, otherfeatures=None
):
"""
Compute edge values for graph cut tlinks based on image intensity
and texture.
"""
# self.fit_model(data, voxelsize, seeds)
# There is a need to have small vaues for good fit
# R(obj) = -ln( Pr (Ip | O) )
# R(bck) = -ln( Pr (Ip | B) )
# Boykov2001b
# ln is computed in likelihood
tdata1 = (-(self.mdl.likelihood_from_image(data, voxelsize, 1))) * 10
tdata2 = (-(self.mdl.likelihood_from_image(data, voxelsize, 2))) * 10
# to spare some memory
dtype = np.int16
if np.any(tdata1 > 32760):
dtype = np.float32
if np.any(tdata2 > 32760):
dtype = np.float32
if self.segparams["use_apriori_if_available"] and self.apriori is not None:
logger.debug("using apriori information")
gamma = self.segparams["apriori_gamma"]
a1 = (-np.log(self.apriori * 0.998 + 0.001)) * 10
a2 = (-np.log(0.999 - (self.apriori * 0.998))) * 10
# logger.debug('max ' + str(np.max(tdata1)) + ' min ' + str(np.min(tdata1)))
# logger.debug('max ' + str(np.max(tdata2)) + ' min ' + str(np.min(tdata2)))
# logger.debug('max ' + str(np.max(a1)) + ' min ' + str(np.min(a1)))
# logger.debug('max ' + str(np.max(a2)) + ' min ' + str(np.min(a2)))
tdata1u = (((1 - gamma) * tdata1) + (gamma * a1)).astype(dtype)
tdata2u = (((1 - gamma) * tdata2) + (gamma * a2)).astype(dtype)
tdata1 = tdata1u
tdata2 = tdata2u
# logger.debug(' max ' + str(np.max(tdata1)) + ' min ' + str(np.min(tdata1)))
# logger.debug(' max ' + str(np.max(tdata2)) + ' min ' + str(np.min(tdata2)))
# logger.debug('gamma ' + str(gamma))
# import sed3
# ed = sed3.show_slices(tdata1)
# ed = sed3.show_slices(tdata2)
del tdata1u
del tdata2u
del a1
del a2
# if np.any(tdata1 < 0) or np.any(tdata2 <0):
# logger.error("Problem with tlinks. Likelihood is < 0")
# if self.debug_images:
# self.__show_debug_tdata_images(tdata1, tdata2, suptitle="likelihood")
return tdata1, tdata2
def __limit(self, tdata1, min_limit=0, max_error=10, max_limit=20000):
# logger.debug('before limit max ' + np.max(tdata1), 'min ' + np.min(tdata1) + " dtype " + tdata1.dtype)
tdata1[tdata1 > max_limit] = max_limit
tdata1[tdata1 < min_limit] = min_limit
# tdata1 = models.softplus(tdata1, max_error=max_error, keep_dtype=True)
# replace inf with large finite number
# tdata1 = np.nan_to_num(tdata1)
return tdata1
def __limit_tlinks(self, tdata1, tdata2):
tdata1 = self.__limit(tdata1)
tdata2 = self.__limit(tdata2)
return tdata1, tdata2
def __create_tlinks(
self,
data,
voxelsize,
# voxels1, voxels2,
seeds,
area_weight,
hard_constraints,
mul_mask=None,
mul_val=None,
):
tdata1, tdata2 = self.__similarity_for_tlinks_obj_bgr(
data,
voxelsize,
# voxels1, voxels2,
# seeds
)
# logger.debug('tdata1 min %f , max %f' % (tdata1.min(), tdata1.max()))
# logger.debug('tdata2 min %f , max %f' % (tdata2.min(), tdata2.max()))
if hard_constraints:
if type(seeds) == "bool":
raise Exception(
"Seeds variable not set",
"There is need set seed if you want use hard constraints",
)
tdata1, tdata2 = self.__set_hard_hard_constraints(tdata1, tdata2, seeds)
limit = 20000
# carefull multiplication with limit to
if mul_mask is not None:
divided_limit = limit / mul_val
mm = tdata1 > divided_limit
tdata1[mul_mask & mm] = limit
tdata1[mul_mask & ~mm] *= mul_val
mm = tdata2 > divided_limit
tdata2[mul_mask & mm] = limit
tdata2[mul_mask & ~mm] *= mul_val
# if not np.isscalar(area_weight):
# area_weight = area_weight.reshape(tdata1.shape)
tdata1 = self.__limit(tdata1)
tdata2 = self.__limit(tdata2)
unariesalt = (
0
+ (
np.dstack(
area_weight * [tdata1.reshape(-1, 1), tdata2.reshape(-1, 1)]
).copy("C")
)
).astype(np.int32)
unariesalt = self.__limit(unariesalt)
# if self.debug_images:
# self.__show_debug_(unariesalt, suptitle="after weighing and limitation")
return unariesalt
def __create_nlinks(self, data, inds=None, boundary_penalties_fcn=None):
"""
Compute nlinks grid from data shape information. For boundary penalties
are data (intensities) values are used.
ins: Default is None. Used for multiscale GC. This are indexes of
multiscale pixels. Next example shows one superpixel witn index 2.
inds = [
[1 2 2],
[3 2 2],
[4 5 6]]
boundary_penalties_fcn: is function with one argument - axis. It can
it can be used for setting penalty weights between neighbooring
pixels.
"""
# use the gerneral graph algorithm
# first, we construct the grid graph
start = time.time()
if inds is None:
inds = np.arange(data.size).reshape(data.shape)
# if not self.segparams['use_boundary_penalties'] and \
# boundary_penalties_fcn is None :
if boundary_penalties_fcn is None:
edgs_arr = self._prepare_edgs_arr_with_no_fcn(inds)
else:
logger.info("use_boundary_penalties")
edgs_arr = self._prepare_edgs_arr_from_boundary_fcn(inds, boundary_penalties_fcn)
# import pdb; pdb.set_trace()
edges = np.vstack(edgs_arr).astype(np.int32)
# edges - seznam indexu hran, kteres spolu sousedi\
elapsed = time.time() - start
self.stats["_create_nlinks time"] = elapsed
logger.info("__create nlinks time " + str(elapsed))
return edges
def _prepare_edgs_arr_with_no_fcn(self, inds):
if self.img.ndim == 3:
# This is faster for some specific format
edgx = np.c_[inds[:, :, :-1].ravel(), inds[:, :, 1:].ravel()]
edgy = np.c_[inds[:, :-1, :].ravel(), inds[:, 1:, :].ravel()]
edgz = np.c_[inds[:-1, :, :].ravel(), inds[1:, :, :].ravel()]
edgs_arr = [edgx, edgy, edgz]
elif self.img.ndim == 2:
edgx = np.c_[inds[:, :-1].ravel(), inds[:, 1:].ravel()]
edgy = np.c_[inds[:-1, :].ravel(), inds[1:, :].ravel()]
edgs_arr = [edgx, edgy]
else:
logger.error(f"Input data dimension {self.img.ndim} is no supported")
edgs_arr = None
return edgs_arr
def _prepare_edgs_arr_from_boundary_fcn(self, inds, boundary_penalties_fcn):
"""
prepare list of edgs for each axis
:param boundary_penalties_fcn: function working with intensities
:param inds:
:return:
"""
bpw = self.segparams["boundary_penalties_weight"]
if self.img.ndim == 3:
bpa = boundary_penalties_fcn(2)
# id1=inds[:, :, :-1].ravel()
edgx = np.c_[
inds[:, :, :-1].ravel(),
inds[:, :, 1:].ravel(),
# cc * np.ones(id1.shape)
bpw * bpa[:, :, 1:].ravel(),
]
bpa = boundary_penalties_fcn(1)
# id1 =inds[:, 1:, :].ravel()
edgy = np.c_[
inds[:, :-1, :].ravel(),
inds[:, 1:, :].ravel(),
# cc * np.ones(id1.shape)]
bpw * bpa[:, 1:, :].ravel(),
]
bpa = boundary_penalties_fcn(0)
# id1 = inds[1:, :, :].ravel()
edgz = np.c_[
inds[:-1, :, :].ravel(),
inds[1:, :, :].ravel(),
# cc * np.ones(id1.shape)]
bpw * bpa[1:, :, :].ravel(),
]
edgs_arr = [edgx, edgy, edgz]
elif self.img.ndim == 2:
bpa = boundary_penalties_fcn(1)
# id1=inds[:, :, :-1].ravel()
edgx = np.c_[
inds[:, :-1].ravel(),
inds[:, 1:].ravel(),
# cc * np.ones(id1.shape)
bpw * bpa[:, 1:].ravel(),
]
bpa = boundary_penalties_fcn(0)
# id1 =inds[:, 1:, :].ravel()
edgy = np.c_[
inds[:-1, :].ravel(),
inds[1:, :].ravel(),
# cc * np.ones(id1.shape)]
bpw * bpa[1:, :].ravel(),
]
edgs_arr = [edgx, edgy]
else:
logger.error(f"Input data dimension {self.img.ndim} is no supported")
edgs_arr = None
return edgs_arr
def debug_get_reconstructed_similarity(
self,
data3d=None,
voxelsize=None,
seeds=None,
area_weight=1,
hard_constraints=True,
return_unariesalt=False,
):
"""
Use actual model to calculate similarity. If no input is given the last image is used.
:param data3d:
:param voxelsize:
:param seeds:
:param area_weight:
:param hard_constraints:
:param return_unariesalt:
:return:
"""
if data3d is None:
data3d = self.img
if voxelsize is None:
voxelsize = self.voxelsize
if seeds is None:
seeds = self.seeds
unariesalt = self.__create_tlinks(
data3d,
voxelsize,
# voxels1, voxels2,
seeds,
area_weight,
hard_constraints,
)
if return_unariesalt:
return unariesalt
else:
return self._reshape_unariesalt_to_similarity(unariesalt, data3d.shape)
def debug_show_reconstructed_similarity(
self,
data3d=None,
voxelsize=None,
seeds=None,
area_weight=1,
hard_constraints=True,
show=True,
bins=20,
slice_number=None,
):
"""
Show tlinks.
:param data3d: ndarray with input data
:param voxelsize:
:param seeds:
:param area_weight:
:param hard_constraints:
:param show:
:param bins: histogram bins number
:param slice_number:
:return:
"""
unariesalt = self.debug_get_reconstructed_similarity(
data3d,
voxelsize=voxelsize,
seeds=seeds,
area_weight=area_weight,
hard_constraints=hard_constraints,
return_unariesalt=True,
)
self._debug_show_unariesalt(
unariesalt, show=show, bins=bins, slice_number=slice_number
)
def debug_inspect_node(self, node_msindex):
"""
Get info about the node. See pycut.inspect_node() for details.
Processing is done in temporary shape.
:param node_seed:
:return: node_unariesalt, node_neighboor_edges_and_weights, node_neighboor_seeds
"""
return inspect_node(self.nlinks, self.unariesalt2, self.msinds, node_msindex)
def debug_get_node_msindex(self, node_seeds):
"""
Get multiscale index of voxel selected by seeds.
:param node_seeds: ndarray
:return int with multiscale index
"""
node_msindex = get_node_msindex(self.msinds, node_seeds)
return node_msindex
def debug_interactive_inspect_node(self):
"""
Call after segmentation to see selected node neighborhood.
User have to select one node by click.
:return:
"""
if (
np.sum(
np.abs(
np.asarray(self.msinds.shape) - np.asarray(self.segmentation.shape)
)
)
== 0
):
segmentation = self.segmentation
else:
segmentation = self.temp_msgc_resized_segmentation
logger.info("Click to select one voxel of interest")
import sed3
ed = sed3.sed3(self.msinds, contour=segmentation == 0)
ed.show()
edseeds = ed.seeds
node_msindex = get_node_msindex(self.msinds, edseeds)
node_unariesalt, node_neighboor_edges_and_weights, node_neighboor_seeds = self.debug_inspect_node(
node_msindex
)
import sed3
ed = sed3.sed3(
self.msinds, contour=segmentation == 0, seeds=node_neighboor_seeds
)
ed.show()
return (
node_unariesalt,
node_neighboor_edges_and_weights,
node_neighboor_seeds,
node_msindex,
)
def debug_reconstruct_nlinks_max(self):
"""
It will shoe the maximum weight between neighboor pixels.
It should be different in hires area and lowres area.
In lowres area it whould be k* higher than highres where k is tile size.
:return:
"""
return reconstruct_nlinks_max(self.nlinks, self.msinds)
def _ssgc_prepare_data_and_run_computation(
self,
# voxels1, voxels2,
hard_constraints=True,
area_weight=1,
):
"""
Setting of data.
You need set seeds if you want use hard_constraints.
"""
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
# import pdb; pdb.set_trace() # BREAKPOINT
unariesalt = self.__create_tlinks(
self.img,
self.voxelsize,
# voxels1, voxels2,
self.seeds,
area_weight,
hard_constraints,
)
# některém testu organ semgmentation dosahují unaries -15. což je podiné
# stačí vyhodit print před if a je to vidět
logger.debug("unaries %.3g , %.3g" % (np.max(unariesalt), np.min(unariesalt)))
# create potts pairwise
# pairwiseAlpha = -10
pairwise = -(np.eye(2) - 1)
pairwise = (self.segparams["pairwise_alpha"] * pairwise).astype(np.int32)
# pairwise = np.array([[0,30],[30,0]]).astype(np.int32)
# print pairwise
self.iparams = {}
if self.segparams["use_boundary_penalties"]:
sigma = self.segparams["boundary_penalties_sigma"]
# set boundary penalties function
# Default are penalties based on intensity differences
boundary_penalties_fcn = lambda ax: self._boundary_penalties_array(
axis=ax, sigma=sigma
)
else:
boundary_penalties_fcn = None
nlinks = self.__create_nlinks(
self.img, boundary_penalties_fcn=boundary_penalties_fcn
)
self.stats["tlinks shape"].append(unariesalt.reshape(-1, 2).shape)
self.stats["nlinks shape"].append(nlinks.shape)
# we flatten the unaries
# result_graph = cut_from_graph(nlinks, unaries.reshape(-1, 2),
# pairwise)
start = time.time()
if self.debug_images:
self._debug_show_unariesalt(unariesalt)
result_graph = pygco.cut_from_graph(nlinks, unariesalt.reshape(-1, 2), pairwise)
elapsed = time.time() - start
self.stats["gc time"] = elapsed
result_labeling = result_graph.reshape(self.img.shape)
return result_labeling
def _msgc_lo2hi_resize_init(self):
self._lo2hi_resize_original_shape = self.img.shape
new_shape = (
np.ceil(np.asarray(self.img.shape) / float(self.segparams["block_size"]))
* self.segparams["block_size"]
).astype(np.int)
crinfo = list(zip([0] * self.img.ndim, self.img.shape))
self.img = uncrop(self.img, crinfo, new_shape, outside_mode="nearest")
self.seeds = uncrop(self.seeds, crinfo, new_shape)
def _msgc_lo2hi_resize_clean_finish(self):
orig_shape = self._lo2hi_resize_original_shape
self.temp_msgc_resized_img = self.img
self.temp_msgc_resized_segmentation = self.segmentation
self.temp_msgc_resized_seeds = self.seeds
self.img = self.temp_msgc_resized_img[
: orig_shape[0], : orig_shape[1], : orig_shape[2]
]
self.segmentation = self.temp_msgc_resized_segmentation[
: orig_shape[0], : orig_shape[1], : orig_shape[2]
]
self.seeds = self.temp_msgc_resized_seeds[
: orig_shape[0], : orig_shape[1], : orig_shape[2]
]
if not self.keep_temp_properties:
del self.temp_msgc_resized_segmentation
del self.temp_msgc_resized_img
del self.temp_msgc_resized_seeds
del self._lo2hi_resize_original_shape
# self.stats["t11"] = time.time() - self._start_time
def save(self, filename):
"""
Save model to file
:param filename: Path to file
:return:
"""
self.mdl.save(filename)
def ms_remove_repetitive_link(nlinks_not_unique):
# nlinks = np.array(
# [list(x) for x in set(tuple(x) for x in nlinks_not_unique)]
# )
a = nlinks_not_unique
nlinks = (
np.unique(a.view(np.dtype((np.void, a.dtype.itemsize * a.shape[1]))))
.view(a.dtype)
.reshape(-1, a.shape[1])
)
return nlinks
def get_neighborhood_edes(nlinks, node_msindex):
node_neighbor_edges = np.vstack(
[
nlinks[np.where(nlinks[:, 0] == node_msindex)],
nlinks[np.where(nlinks[:, 1] == node_msindex)],
]
)
return node_neighbor_edges
def inspect_node_neighborhood(nlinks, msinds, node_msindex):
"""
Get information about one node in graph
:param nlinks: neighboorhood edges
:param msinds: indexes in 3d image
:param node_msindex: int, multiscale index of selected voxel
:return: node_neighboor_edges_and_weights, node_neighboor_seeds
"""
# seed_indexes = np.nonzero(node_seed)
# selected_inds = msinds[seed_indexes]
# node_msindex = selected_inds[0]
node_neighbor_edges = get_neighborhood_edes(nlinks, node_msindex)
node_neighbor_seeds = np.zeros_like(msinds, dtype=np.int8)
for neighboor_ind in np.unique(node_neighbor_edges[:, :2].ravel()):
node_neighbor_ind = np.where(msinds == neighboor_ind)
node_neighbor_seeds[node_neighbor_ind] = 2
node_neighbor_seeds[np.where(msinds == node_msindex)] = 1
# node_coordinates = np.unravel_index(selected_voxel_ind, msinds.shape)
# node_neighbor_coordinates = np.unravel_index(np.unique(node_neighbor_edges[:, :2].ravel()), msinds.shape)
return node_neighbor_edges, node_neighbor_seeds
def inspect_node(nlinks, unariesalt, msinds, node_msindex):
"""
Get information about one node in graph
:param nlinks: neighboorhood edges
:param unariesalt: weights
:param msinds: indexes in 3d image
:param node_msindex: msindex of selected node. See get_node_msindex()
:return: node_unariesalt, node_neighboor_edges_and_weights, node_neighboor_seeds
"""
node_unariesalt = unariesalt[node_msindex]
neigh_edges, neigh_seeds = inspect_node_neighborhood(nlinks, msinds, node_msindex)
return node_unariesalt, neigh_edges, neigh_seeds
def get_node_msindex(msinds, node_seed):
"""
Convert seeds-like selection of voxel to multiscale index.
:param msinds: ndarray with indexes
:param node_seed: ndarray with 1 where selected pixel is, or list of indexes in this array
:return: multiscale index of first found seed
"""
if type(node_seed) == np.ndarray:
seed_indexes = np.nonzero(node_seed)
elif type(node_seed) == list:
seed_indexes = node_seed
else:
seed_indexes = [node_seed]
selected_nodes_msinds = msinds[seed_indexes]
node_msindex = selected_nodes_msinds[0]
return node_msindex
def reconstruct_nlinks_max(nlinks, msinds):
nlinks_max = np.zeros_like(msinds, dtype=nlinks.dtype)
for node_msindex in np.unique(msinds):
edges = get_neighborhood_edes(nlinks, node_msindex)
nlinks_max[msinds == node_msindex] = np.max(edges[:, 2])
return nlinks_max
# class Tests(unittest.TestCase):
# def setUp(self):
# pass
# def test_segmentation(self):
# data_shp = [16,16,16]
# data = generate_data(data_shp)
# seeds = np.zeros(data_shp)
# setting background seeds
# seeds[:,0,0] = 1
# seeds[6,8:-5,2] = 2
# x[4:-4, 6:-2, 1:-6] = -1
# igc = ImageGraphCut(data)
# igc.interactivity()
# instead of interacitivity just set seeeds
# igc.noninteractivity(seeds)
# instead of showing just test results
# igc.show_segmentation()
# segmentation = igc.segmentation
# Testin some pixels for result
# self.assertTrue(segmentation[0, 0, -1] == 0)
# self.assertTrue(segmentation[7, 9, 3] == 1)
# self.assertTrue(np.sum(segmentation) > 10)
# pdb.set_trace()
# self.assertTrue(True)
# logger.debug(igc.segmentation.shape)
usage = "%prog [options]\n" + __doc__.rstrip()
help = {
"in_file": 'input *.mat file with "data" field',
"out_file": "store the output matrix to the file",
"debug": "debug mode",
"debug_interactivity": "turn on interactive debug mode",
"test": "run unit test",
}
def relabel_squeeze(data):
""" Makes relabeling of data if there are unused values. """
palette, index = np.unique(data, return_inverse=True)
data = index.reshape(data.shape)
# realy slow solution
# unq = np.unique(data)
# actual_label = 0
# for lab in unq:
# data[data == lab] = actual_label
# actual_label += 1
# one another solution probably slower
# arr = data
# data = (np.digitize(arr.reshape(-1,),np.unique(arr))-1).reshape(arr.shape)
return data
# @profile
def main():
# logger = logging.getLogger(__file__)
logger = logging.getLogger()
logger.setLevel(logging.WARNING)
logging.basicConfig(format="%(message)s")
ch = logging.StreamHandler()
formatter = logging.Formatter(
"%(levelname)-5s [%(module)s:%(funcName)s:%(lineno)d] %(message)s"
)
ch.setFormatter(formatter)
logger.addHandler(ch)
# parser = OptionParser(description='Organ segmentation')
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-d", "--debug", action="store_true", help=help["debug"])
parser.add_argument(
"-di",
"--debug-interactivity",
action="store_true",
help=help["debug_interactivity"],
)
parser.add_argument(
"-i", "--input-file", action="store", default=None, help=help["in_file"]
)
parser.add_argument("-t", "--tests", action="store_true", help=help["test"])
parser.add_argument(
"-o",
"--outputfile",
action="store",
dest="out_filename",
default="output.mat",
help=help["out_file"],
)
# (options, args) = parser.parse_args()
options = parser.parse_args()
debug_images = False
if options.debug:
logger.setLevel(logging.DEBUG)
# print DEBUG
# DEBUG = True
if options.debug_interactivity:
debug_images = True
# if options.tests:
# sys.argv[1:]=[]
# unittest.main()
if options.input_file is None:
raise IOError("No input data!")
else:
dataraw = loadmat(options.input_file, variable_names=["data", "voxelsize_mm"])
# import pdb; pdb.set_trace() # BREAKPOINT
logger.debug("\nvoxelsize_mm " + dataraw["voxelsize_mm"].__str__())
if sys.platform == "win32":
# hack, on windows is voxelsize read as 2D array like [[1, 0.5, 0.5]]
dataraw["voxelsize_mm"] = dataraw["voxelsize_mm"][0]
igc = ImageGraphCut(
dataraw["data"],
voxelsize=dataraw["voxelsize_mm"],
debug_images=debug_images # noqa
# , modelparams={'type': 'gaussian_kde', 'params': []}
# , modelparams={'type':'kernel', 'params':[]} #noqa not in old scipy
# , modelparams={'type':'gmmsame', 'params':{'cvtype':'full', 'n_components':3}} # noqa 3 components
# , segparams={'type': 'multiscale_gc'} # multisc gc
,
segparams={"method": "multiscale_graphcut"} # multisc gc
# , modelparams={'fv_type': 'fv001'}
# , modelparams={'type': 'dpgmm', 'params': {'cvtype': 'full', 'n_components': 5, 'alpha': 10}} # noqa 3 components
)
igc.interactivity()
logger.debug("igc interactivity countr: %s", igc.interactivity_counter)
logger.debug(igc.segmentation.shape)
if __name__ == "__main__":
main()
| bsd-3-clause |
PatrickOReilly/scikit-learn | sklearn/feature_extraction/hashing.py | 74 | 6153 | # Author: Lars Buitinck
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional, default np.float64
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional, default "dict"
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
dlenski/top500 | top500_plot.py | 1 | 8180 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys, json
from itertools import cycle, product
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt, dates as mpld, use
plt.rcParams['font.size']=20
plt.rcParams['svg.fonttype'] = 'none' # don't embed or render font (https://stackoverflow.com/a/35734729)
#pl.rcParams['legend.fontsize']*=1.1
#pl.rcParams['xtick.labelsize']*=1.25
#pl.rcParams['ytick.labelsize']*=1.25
plt.rcParams['legend.fontsize']='x-small'
##########################
# get the data
df = pd.read_csv('TOP500_history.csv', low_memory=False, parse_dates={'Date': ['Year','Month','Day']})
assert (df.groupby(('Date',)).size()==500).all()
# Make mostly-coherent processor family and vendor columns
def remap(procfam):
if procfam in ('Intel EM64T','Intel Nehalem','Intel Westmere','Intel SandyBridge','Intel IvyBridge','Intel Haswell','Intel Core','Intel Broadwell','Intel Skylake','Intel Cascade Lake','Intel Cascade lake','AMD x86_64','AMD Zen (Naples)','AMD Zen-2 (Rome)'):
i,v='x86-64', procfam.split()[0]
elif procfam in ('Intel MIC','Intel Xeon Phi'):
i,v='Xeon Phi','Intel'
elif procfam in ('POWER','Power','PowerPC'):
i=v='POWER'
elif procfam == 'Intel IA-64':
i,v='Itanium', 'Intel'
elif procfam in ('Intel IA-32','AMD'):
i,v='x86-32', procfam.split()[0]
else:
i,v=procfam, procfam
return pd.Series((i,v))
procfam = df['Processor Family'].where(df['Processor Family'].notna(), df['Processor Technology'])
df[['ISA','Vendor']] = procfam.apply(remap)
# get country codes
for f, t in (('Saudia Arabia', 'Saudi Arabia'), # typo in TOP500 sources
('Korea, South', 'South Korea'), # Match country-en.csv
('Czech Republic', 'Czechia'), # Match country-en.csv
('Slovak Republic', 'Slovakia'), # Match country-en.csv
('Hong Kong', 'Hong Kong SAR China')): # Match country-en.csv
df['Country'].replace(f, t, inplace=True)
dfc_en = pd.read_csv('country-en.csv')
dfc_en.columns = ('CountryISO', 'Country')
df = df.merge(dfc_en, on='Country', how='left')
assert (df['CountryISO'].isnull()==False).all()
# get localized labels and countries
loclabels = json.load(open('labels-i18n.json'))
countries = None
for lang in loclabels:
clang = pd.read_csv('country-%s.csv'%lang, encoding='utf-8')
clang.columns = ('CountryISO', lang)
if countries is None:
countries = clang
else:
countries = countries.merge(clang, on='CountryISO')
countries.set_index('CountryISO', inplace=True)
assert (df.groupby(('Date',)).size()==500).all()
##########################
# Find what set of countries (sorted by weight) account for most of the total counts
country_by_date = df.groupby(('Date','CountryISO')).size()
country_wt = country_by_date.sum(level='CountryISO').sort_values(ascending=False).to_frame('sum')
#country_wt['sum'] = country_by_date.sum(level='Country')
cutoff = country_wt['sum'].cumsum() > 0.90*country_wt['sum'].sum()
#country_by_date = country_by_date.reset_index()
#country_by_date = country_by_date.groupby(('Date','Country')).sum()
country_by_date = country_by_date.unstack() # pivot Country from row to column index
country_by_date = country_by_date.fillna(0) # fill in missing values (e.g. x86_64 in 1993 ;-))
major_minor_countries = [ country_by_date.reindex(columns=country_wt.index[cutoff==polarity])
for polarity in (False,True) ]
# plot it
for lang, langlabels in loclabels.items():
colors = cycle( list('bcgmry') )
hatches = cycle(('/', '*', '\\', 'o', 'x', 'O', '.'))
print("Plotting TOP500 systems by country (%s)..." % lang)
fig = plt.figure(figsize=(14,10))
sharex = None
patches, labels = [], []
dates = country_by_date.index
for pos, cbd in enumerate(major_minor_countries):
plt.subplot(2, 1, 2-pos, sharex=sharex)
sharex = ax = fig.gca()
edge = 0
bottom = None
for pp, ser in cbd.items():
hatch = next(hatches)
facecolor = next(colors)
label = countries.loc[pp,lang]
ax.fill_between(dates, edge, edge+ser, edgecolor='k', facecolor=facecolor, hatch=hatch, label=label)
ax.xaxis.set_major_formatter(mpld.DateFormatter("%Y")) #"’%y"))
ax.xaxis.set_major_locator(mpld.YearLocator())
ax.xaxis.set_minor_locator(mpld.YearLocator(month=7))
plt.xticks(rotation='60')
patches.append( plt.Rectangle((0,0), 2, 2, edgecolor='k', facecolor=facecolor, hatch=hatch) )
labels.append(label)
edge += ser
# show legend and labels
plt.ylabel(langlabels['nsys'])
plt.ylim(bottom, min(500, edge.max() + 0.1*edge.ptp()))
if pos==0:
plt.xlabel(langlabels['date'])
elif pos==1:
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_xlabel(), visible=False)
plt.title(langlabels['by_country'])
plt.legend(patches, labels, loc='upper left', bbox_to_anchor=(1.02, 1), handleheight=1.2, handlelength=3, ncol=2)
plt.subplots_adjust(left=.08, top=.92, bottom=0.12, right=0.6, hspace=0.02)
plt.xlim(dates.min(), dates.max())
plt.savefig("Countries_with_TOP500_supercomputers_%s.png"%lang, bbox_inches='tight')
plt.savefig("Countries_with_TOP500_supercomputers_%s.svg"%lang, bbox_inches='tight')
##########################
# Processor families by date, sorted by weight of ISA then by Vendor
proc_counts = df.groupby(('ISA','Vendor','Date')).size()
proc_by_date = proc_counts.unstack(level=(0,1)).fillna(0) # pivot ISA,Vendor from row to column index
proc_wt = proc_by_date.sum().to_frame() # weight (ISA,Vendor) across all dates
ISA_wt = proc_wt.sum(level='ISA') # weight by (ISA) across all dates
ISA_wt.columns = [1]
proc_wt = proc_wt.join( ISA_wt.reindex(proc_wt.index, level='ISA') )
proc_wt.sort_values([1,0], ascending=(False,False), inplace=True)
proc_by_date = proc_by_date.reindex(columns=proc_wt.index)
# plot it
for lang, langlabels in loclabels.items():
colors = cycle( list('bcgmry') )
hatches = cycle(('/', '*', '\\', 'o', 'x', 'O', '.'))
print("Plotting TOP500 systems by process family (%s)..." % lang)
fig = plt.figure(figsize=(14,10))
patches, labels = [], []
dates = proc_by_date.index
edge = 0
pplast = facecolor = bottom = None
for pp, ser in proc_by_date.items():
#print ser.shape, edge.shape, dates.shape
if isinstance(pp, str): pp=pp,
if pplast is None or pp[0]!=pplast[0]:
hatch = next(hatches)
if pplast is None or len(pp)<2 or pp[1]!=pplast[1]:
facecolor = next(colors)
label = ("%s (%s)"%pp if pp[0]!=pp[1] else pp[0])
ax = fig.gca()
ax.fill_between(dates, edge, edge+ser, edgecolor='k', facecolor=facecolor, hatch=hatch, label=label)
ax.xaxis.set_major_formatter(mpld.DateFormatter("%Y")) #"’%y"))
ax.xaxis.set_major_locator(mpld.YearLocator(month=6))
ax.xaxis.set_minor_locator(mpld.YearLocator(month=11))
plt.xticks(rotation=60)
patches.append( plt.Rectangle((0,0), 2, 2, edgecolor='k', facecolor=facecolor, hatch=hatch) )
labels.append(label)
edge += ser
pplast = pp
if bottom is None:
bottom = max(0, edge.min() - 0.1*edge.ptp())
# show legend and labels
plt.legend(patches, labels, loc='upper left', bbox_to_anchor=(1.02, 1), handleheight=1, handlelength=4)
plt.subplots_adjust(left=.08, top=.92, bottom=0.12, right=0.75)
plt.xlabel(langlabels['date'])
plt.ylabel(langlabels['nsys'])
plt.title(langlabels['by_procfam'])
plt.xlim(dates.min(), dates.max())#+pd.datetools.relativedelta(months=6))
plt.ylim(bottom, min(500, edge.max() + 0.1*edge.ptp()))
plt.savefig("Processor_families_in_TOP500_supercomputers_%s.png"%lang, bbox_inches='tight')
plt.savefig("Processor_families_in_TOP500_supercomputers_%s.svg"%lang, bbox_inches='tight')
#plt.show()
| gpl-3.0 |
PatrickChrist/scikit-learn | sklearn/externals/joblib/__init__.py | 86 | 4795 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.9.0b3'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
ZENGXH/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
iut-ibk/DynaMind-UrbanSim | 3rdparty/opus/src/opus_core/indicator_framework/core/indicator_data_manager.py | 2 | 12819 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
import re
from opus_core.configurations.dataset_pool_configuration import DatasetPoolConfiguration
from opus_core.indicator_framework.core.source_data import SourceData
from opus_core.logger import logger
class IndicatorDataManager:
def export_indicator(self, indicator, source_data, year = None):
self._export_indicator_to_file(indicator, source_data, year)
def _export_indicator_to_file(self, indicator, source_data, year):
VERSION = 1.0
'''Writes to a file information about this indicator'''
if not indicator.write_to_file:
return
lines = []
class_name = indicator.__class__.__name__
lines.append('<version>%.1f</version>'%VERSION)
lines.append('<%s>'%class_name)
basic_attributes = ['dataset_name',
'years',
'date_computed',
'name',
'operation',
'storage_location']
attrs = '[\'' + '\';\''.join(indicator.attributes) + '\']'
lines.append('\t<attributes>%s</attributes>'%(
attrs))
for basic_attr in basic_attributes:
attr_value = indicator.__getattribute__(basic_attr)
lines.append('\t<%s>%s</%s>'%(basic_attr,
str(attr_value),
basic_attr))
#get additional attributes for child classes...
for attr,value in indicator.get_additional_metadata():
lines.append('\t<%s>%s</%s>'%(attr,str(value),attr))
lines += source_data.get_metadata(indentation = 1)
lines.append('</%s>'%class_name)
#write to metadata file
file = indicator.get_file_name(year = year, extension = 'meta')
path = os.path.join(indicator.get_storage_location(), file)
f = open(path, 'w')
output = '\n'.join(lines)
f.write(output)
f.close()
return lines
def import_indicators(self, indicator_directory):
return self._import_indicators_from_file(indicator_directory)
def _import_indicators_from_file(self, indicator_directory):
'''scans the indicator directory for indicator meta files and
recreates the indicators'''
import fnmatch
files = [os.path.join(indicator_directory, f)
for f in os.listdir(indicator_directory)
if fnmatch.fnmatch(f,'*.meta')]
indicators = []
for f in files:
try:
indicator = self._import_indicator_from_file(f)
indicators.append(indicator)
except Exception, e:
logger.log_warning('Could not load indicator from %s: %s'%(f,e))
return indicators
'''not in use yet'''
# def import_indicator(self):
# meta_path = os.path.join(indicator_directory, filename)
# indicator = self._import_indicator_from_file(meta_path)
# indicators.append(indicator)
def _import_indicator_from_file(self, file_path):
'''creates and returns an indicator from the file pointed to in file_path
'''
#TODO: If the additional child parameters are not strings, the current implementation will fail.
f = open(file_path)
version = f.readline()
indicator_class = f.readline().strip()
indicator_class = indicator_class[1:-1]
in_source_data = False
source_data_params = {}
non_constructor_attr = {
'date_computed': None
}
params = {}
for line in f.readlines():
line = line.strip()
if line == '<source_data>':
in_source_data = True
elif line == '</source_data>':
in_source_data = False
elif line != '</%s>'%indicator_class:
(name, value) = self._extract_name_and_value(line)
#TODO: figure out way for each object to know which values to
#reinterpret from string
if name == 'years' or name == 'scale':
if value == 'None' or value == '[]':
value = []
elif name=='scale':
value = [float(y) for y in value[1:-1].split(',')]
elif name=='years':
value = [int(y) for y in value[1:-1].split(',')]
elif name == 'attributes':
if value == 'None' or value == '[]':
value = []
else:
value = [attr.strip().replace("'",'') for attr in value[1:-1].split(';')]
if in_source_data:
if name == 'package_order':
order = [eval(p.strip()) for p in value[1:-1].split(',')]
pool = DatasetPoolConfiguration(
package_order = order,
)
source_data_params['dataset_pool_configuration'] = pool
else:
source_data_params[name] = value
else:
if name == 'dataset_name':
params['dataset_name'] = value
elif name in non_constructor_attr:
non_constructor_attr[name] = value
else:
params[name] = value
f.close()
cache_directory = os.path.split(os.path.dirname(file_path).split()[0])[0]
source_data_params['cache_directory'] = cache_directory
indicator = self._create_indicator(indicator_class,
params,
non_constructor_attr,
source_data_params)
return indicator
def _create_indicator(self, indicator_class, params, non_constructor_attributes, source_data_params):
source_data = SourceData(**source_data_params)
for k,v in params.items():
if v=='None':
params[k] = None
params['source_data'] = source_data
module = self._get_module_from_indicator_class(indicator_class)
if indicator_class != 'DatasetTable':
params['attribute'] = params['attributes'][0]
del params['attributes']
exec('from opus_core.indicator_framework.image_types.%s import %s'%(module, indicator_class))
indicator = locals()[indicator_class](**params)
for attr, value in non_constructor_attributes.items():
if value == 'None':
value = None
indicator.__setattr__(attr,value)
return indicator
def _extract_name_and_value(self, line):
'''takes a line of xml and returns attr name/value tuple'''
name_re = re.compile('<\w+>')
value_re = re.compile('>.*<')
line = line.strip()
name = name_re.match(line).group()[1:-1]
value = value_re.search(line).group()[1:-1]
return (name, value)
def _get_module_from_indicator_class(self, indicator_class):
modules = {
'DatasetTable': 'dataset_table',
'GeotiffMap': 'geotiff_map',
'Map': 'mapnik_map',
'Chart': 'matplotlib_chart',
'LorenzCurve': 'matplotlib_lorenzcurve',
'Table': 'table'
}
return modules[indicator_class]
from opus_core.tests import opus_unittest
from opus_core.indicator_framework.test_classes.abstract_indicator_test import AbstractIndicatorTest
import os
class Tests(AbstractIndicatorTest):
def setUp(self):
self.data_manager = IndicatorDataManager()
AbstractIndicatorTest.setUp(self)
def test__write_metadata(self):
try:
from opus_core.indicator_framework.image_types.table import Table
except: pass
else:
table = Table(
source_data = self.cross_scenario_source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
output_type = 'tab',
years = [1980,1981] # Indicators are not actually being computed, so the years don't matter here.
)
table.create(False)
table.date_computed = None
output = self.data_manager._export_indicator_to_file(
indicator = table,
source_data = self.cross_scenario_source_data,
year = None)
expected = [
'<version>1.0</version>',
'<Table>',
'\t<attributes>[\'opus_core.test.attribute\']</attributes>',
'\t<dataset_name>test</dataset_name>',
'\t<years>[1980, 1981]</years>',
'\t<date_computed>None</date_computed>',
'\t<name>attribute</name>',
'\t<operation>None</operation>',
'\t<storage_location>%s</storage_location>'%os.path.join(self.temp_cache_path, 'indicators'),
'\t<output_type>tab</output_type>',
'\t<source_data>',
'\t\t<cache_directory>%s</cache_directory>'%self.temp_cache_path,
'\t\t<comparison_cache_directory>%s</comparison_cache_directory>'%self.temp_cache_path2,
'\t\t<run_description>%s</run_description>'%self.cross_scenario_source_data.get_run_description(),
'\t\t<years>[1980]</years>',
'\t\t<package_order>[\'opus_core\']</package_order>',
'\t</source_data>',
'</Table>'
]
for i in range(len(output)):
if expected[i] != output[i]:
print expected[i]
print output[i]
self.assertEqual(output,expected)
def test__read_write_metadata(self):
try:
from opus_core.indicator_framework.image_types.table import Table
except:
raise
else:
table = Table(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
output_type = 'tab',
years = [1980,1981] # Indicators are not actually being computed, so the years don't matter here.
)
table.create(False)
self.data_manager._export_indicator_to_file(indicator = table,
source_data = self.source_data,
year = None)
metadata_file = table.get_file_name(extension = 'meta')
metadata_path = os.path.join(table.get_storage_location(),
metadata_file)
self.assertEqual(os.path.exists(metadata_path), True)
expected_path = 'test__tab__attribute.meta'
self.assertEqual(metadata_file,expected_path)
new_table = self.data_manager._import_indicator_from_file(metadata_path)
for attr in ['attributes','dataset_name',
'output_type','date_computed',
'years']:
old_val = table.__getattribute__(attr)
new_val = new_table.__getattribute__(attr)
self.assertEqual(old_val,new_val)
self.assertEqual(table.source_data.cache_directory,
new_table.source_data.cache_directory)
self.assertEqual(table.source_data.dataset_pool_configuration.package_order,
new_table.source_data.dataset_pool_configuration.package_order)
if __name__ == '__main__':
opus_unittest.main() | gpl-2.0 |
bhilburn/gnuradio | gr-digital/examples/snr_estimators.py | 46 | 6348 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import sys
try:
import scipy
from scipy import stats
except ImportError:
print "Error: Program requires scipy (www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires Matplotlib (matplotlib.sourceforge.net)."
sys.exit(1)
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from optparse import OptionParser
from gnuradio.eng_option import eng_option
'''
This example program uses Python and GNU Radio to calculate SNR of a
noise BPSK signal to compare them.
For an explination of the online algorithms, see:
http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Higher-order_statistics
'''
def online_skewness(data):
n = 0
mean = 0
M2 = 0
M3 = 0
for n in xrange(len(data)):
delta = data[n] - mean
delta_n = delta / (n+1)
term1 = delta * delta_n * n
mean = mean + delta_n
M3 = M3 + term1 * delta_n * (n - 1) - 3 * delta_n * M2
M2 = M2 + term1
return scipy.sqrt(len(data))*M3 / scipy.power(M2, 3.0/2.0);
def snr_est_simple(signal):
s = scipy.mean(abs(signal)**2)
n = 2*scipy.var(abs(signal))
snr_rat = s/n
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_skew(signal):
y1 = scipy.mean(abs(signal))
y2 = scipy.mean(scipy.real(signal**2))
y3 = (y1*y1 - y2)
y4 = online_skewness(signal.real)
#y4 = stats.skew(abs(signal.real))
skw = y4*y4 / (y2*y2*y2);
s = y1*y1
n = 2*(y3 + skw*s)
snr_rat = s / n
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_m2m4(signal):
M2 = scipy.mean(abs(signal)**2)
M4 = scipy.mean(abs(signal)**4)
snr_rat = scipy.sqrt(2*M2*M2 - M4) / (M2 - scipy.sqrt(2*M2*M2 - M4))
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_svr(signal):
N = len(signal)
ssum = 0
msum = 0
for i in xrange(1, N):
ssum += (abs(signal[i])**2)*(abs(signal[i-1])**2)
msum += (abs(signal[i])**4)
savg = (1.0/(float(N)-1.0))*ssum
mavg = (1.0/(float(N)-1.0))*msum
beta = savg / (mavg - savg)
snr_rat = ((beta - 1) + scipy.sqrt(beta*(beta-1)))
return 10.0*scipy.log10(snr_rat), snr_rat
def main():
gr_estimators = {"simple": digital.SNR_EST_SIMPLE,
"skew": digital.SNR_EST_SKEW,
"m2m4": digital.SNR_EST_M2M4,
"svr": digital.SNR_EST_SVR}
py_estimators = {"simple": snr_est_simple,
"skew": snr_est_skew,
"m2m4": snr_est_m2m4,
"svr": snr_est_svr}
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Set the number of samples to process [default=%default]")
parser.add_option("", "--snr-min", type="float", default=-5,
help="Minimum SNR [default=%default]")
parser.add_option("", "--snr-max", type="float", default=20,
help="Maximum SNR [default=%default]")
parser.add_option("", "--snr-step", type="float", default=0.5,
help="SNR step amount [default=%default]")
parser.add_option("-t", "--type", type="choice",
choices=gr_estimators.keys(), default="simple",
help="Estimator type {0} [default=%default]".format(
gr_estimators.keys()))
(options, args) = parser.parse_args ()
N = options.nsamples
xx = scipy.random.randn(N)
xy = scipy.random.randn(N)
bits =2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1
#bits =(2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1) + \
# 1j*(2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1)
snr_known = list()
snr_python = list()
snr_gr = list()
# when to issue an SNR tag; can be ignored in this example.
ntag = 10000
n_cpx = xx + 1j*xy
py_est = py_estimators[options.type]
gr_est = gr_estimators[options.type]
SNR_min = options.snr_min
SNR_max = options.snr_max
SNR_step = options.snr_step
SNR_dB = scipy.arange(SNR_min, SNR_max+SNR_step, SNR_step)
for snr in SNR_dB:
SNR = 10.0**(snr/10.0)
scale = scipy.sqrt(2*SNR)
yy = bits + n_cpx/scale
print "SNR: ", snr
Sknown = scipy.mean(yy**2)
Nknown = scipy.var(n_cpx/scale)
snr0 = Sknown/Nknown
snr0dB = 10.0*scipy.log10(snr0)
snr_known.append(float(snr0dB))
snrdB, snr = py_est(yy)
snr_python.append(snrdB)
gr_src = blocks.vector_source_c(bits.tolist(), False)
gr_snr = digital.mpsk_snr_est_cc(gr_est, ntag, 0.001)
gr_chn = channels.channel_model(1.0/scale)
gr_snk = blocks.null_sink(gr.sizeof_gr_complex)
tb = gr.top_block()
tb.connect(gr_src, gr_chn, gr_snr, gr_snk)
tb.run()
snr_gr.append(gr_snr.snr())
f1 = pylab.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(SNR_dB, snr_known, "k-o", linewidth=2, label="Known")
s1.plot(SNR_dB, snr_python, "b-o", linewidth=2, label="Python")
s1.plot(SNR_dB, snr_gr, "g-o", linewidth=2, label="GNU Radio")
s1.grid(True)
s1.set_title('SNR Estimators')
s1.set_xlabel('SNR (dB)')
s1.set_ylabel('Estimated SNR')
s1.legend()
f2 = pylab.figure(2)
s2 = f2.add_subplot(1,1,1)
s2.plot(yy.real, yy.imag, 'o')
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
e-koch/VLA_Lband | 14B-088/HI/analysis/co_comparison/co_radial_profile.py | 1 | 8353 |
from astropy.io import fits
import matplotlib.pyplot as p
from astropy.coordinates import Angle
from astropy import units as u
import numpy as np
from spectral_cube.lower_dimensional_structures import Projection
from astropy.table import Table
import os
from os.path import exists
from os.path import join as osjoin
from cube_analysis.profiles import surfdens_radial_profile
from paths import (iram_co21_14B088_data_path,
fourteenB_HI_data_wGBT_path,
allfigs_path, c_hi_analysispath,
fourteenB_wGBT_HI_file_dict)
from constants import (co21_mass_conversion, hi_mass_conversion,
beam_eff_30m_druard)
from galaxy_params import gal_feath as gal
from plotting_styles import onecolumn_figure, default_figure
'''
Create the surface density profile in CO(2-1), assuming a factor to convert
to the H2 mass.
'''
fig_path = osjoin(allfigs_path(""), "co_vs_hi")
if not exists(fig_path):
os.mkdir(fig_path)
fig_co_path = osjoin(allfigs_path(""), "CO21_properties")
if not exists(fig_co_path):
os.mkdir(fig_co_path)
# IRAM beam efficiency
beam_eff = beam_eff_30m_druard
# Set the radial disk widths to bin over
dr = 100 * u.pc
# Load the moment 0
co_mom0 = Projection.from_hdu(fits.open(iram_co21_14B088_data_path("m33.co21_iram.14B-088_HI.mom0.fits"))[0])
co_mom0 = co_mom0.to(u.K * u.km / u.s) / beam_eff
hi_mom0 = \
Projection.from_hdu(fits.open(fourteenB_wGBT_HI_file_dict["Moment0"])[0])
rs, sd, sd_sigma = surfdens_radial_profile(gal, mom0=co_mom0,
max_rad=7 * u.kpc, dr=dr,
mass_conversion=co21_mass_conversion)
rs_n, sd_n, sd_sigma_n = \
surfdens_radial_profile(gal, mom0=co_mom0,
pa_bounds=Angle([0.5 * np.pi * u.rad,
-0.5 * np.pi * u.rad]),
max_rad=7 * u.kpc, dr=dr,
mass_conversion=co21_mass_conversion)
rs_s, sd_s, sd_sigma_s = \
surfdens_radial_profile(gal, mom0=co_mom0,
pa_bounds=Angle([-0.5 * np.pi * u.rad,
0.5 * np.pi * u.rad]),
max_rad=7 * u.kpc, dr=dr,
mass_conversion=co21_mass_conversion)
onecolumn_figure()
p.errorbar(rs.value, np.log10(sd.value),
yerr=0.434 * sd_sigma.value / sd.value, fmt="-",
label=r"H$_2$", drawstyle='steps-mid')
p.ylabel(r" log $\Sigma$ (M$_{\odot}$ pc$^{-2}$)")
p.xlabel(r"Radius (kpc)")
p.grid("on")
p.tight_layout()
p.savefig(osjoin(fig_co_path, "M33_Sigma_profile_co21_dr_{}pc.pdf".format(int(dr.value))))
p.savefig(osjoin(fig_co_path, "M33_Sigma_profile_co21_dr_{}pc.png".format(int(dr.value))))
p.close()
# Show the north vs south profiles
p.plot(rs.value, np.log10(sd.value), "-.", drawstyle='steps-mid',
label="Total")
p.errorbar(rs_n.value, np.log10(sd_n.value),
yerr=0.434 * sd_sigma_n.value / sd_n.value, fmt="-",
label="North", drawstyle='steps-mid')
p.errorbar(rs_s.value, np.log10(sd_s.value),
yerr=0.434 * sd_sigma_s.value / sd_s.value, fmt="--",
label="South", drawstyle='steps-mid')
# p.plot(rs_n.value, sd_n.value, "bD-", label="North")
# p.plot(rs_s.value, sd_s.value, "go-", label="South")
p.ylabel(r"log $\Sigma$ (M$_{\odot}$ pc$^{-2}$)")
p.xlabel(r"Radius (kpc)")
p.legend(loc='best', frameon=True)
p.grid("on")
p.savefig(osjoin(fig_co_path, "M33_Sigma_profile_co21_N_S_dr_{}pc.pdf".format(int(dr.value))))
p.savefig(osjoin(fig_co_path, "M33_Sigma_profile_co21_N_S_dr_{}pc.png".format(int(dr.value))))
p.close()
# p.show()
# Now get the HI profile on the same scales
rs_hi, sd_hi, sd_sigma_hi = \
surfdens_radial_profile(gal,
mom0=hi_mom0,
max_rad=7 * u.kpc, dr=dr,
mass_conversion=hi_mass_conversion)
# Overplot these two.
p.errorbar(rs.value, np.log10(sd.value),
yerr=0.434 * sd_sigma.value / sd.value, fmt="-",
label=r"H$_2$", drawstyle='steps-mid')
p.errorbar(rs_hi.value, np.log10(sd_hi.value),
yerr=0.434 * sd_sigma_hi.value / sd_hi.value, fmt="--",
label=r"HI", drawstyle='steps-mid')
p.ylabel(r" log $\Sigma$ (M$_{\odot}$ pc$^{-2}$)")
p.xlabel(r"Radius (kpc)")
p.legend(loc='best', frameon=True)
p.grid("on")
p.tight_layout()
p.savefig(osjoin(fig_path, "M33_Sigma_profile_hi_co21_dr_{}pc.pdf".format(int(dr.value))))
p.savefig(osjoin(fig_path, "M33_Sigma_profile_hi_co21_dr_{}pc.png".format(int(dr.value))))
p.close()
# Save the radial profiles in a FITS table
table = Table([rs, sd, sd_sigma, sd_hi, sd_sigma_hi],
names=('Radius', "CO_Sigma", "CO_Sigma_std", "HI_Sigma",
"HI_Sigma_std"))
table.write(fourteenB_HI_data_wGBT_path("tables/co21_hi_radialprofiles_{}pc.fits".format(int(dr.value)),
no_check=True),
overwrite=True)
# Also plot the total gas surface density against the stellar surface density
# from Corbelli
corbelli = Table.read(c_hi_analysispath("rotation_curves/corbelli_rotation_curve.csv"))
p.semilogy(rs.value, (sd_hi + sd).value,
linestyle="-",
label="Gas", drawstyle='steps-mid')
p.semilogy(corbelli["R"][corbelli["R"] <= 6.5],
corbelli["SigmaStellar"][corbelli["R"] <= 6.5], "g--",
drawstyle='steps-mid',
label="Stars")
p.ylabel(r"log $\Sigma$ / (M$_{\odot}$ pc$^{-2}$)")
p.xlabel(r"Radius (kpc)")
p.legend(loc='best', frameon=True)
p.grid()
p.tight_layout()
p.savefig(osjoin(fig_path, "M33_Sigma_profile_gas_stars_corbelli_{}pc.pdf".format(int(dr.value))))
p.savefig(osjoin(fig_path, "M33_Sigma_profile_gas_stars_corbelli_{}pc.png".format(int(dr.value))))
p.close()
# Finally, let's calculate some clumping factors as in Leroy+13
# rs_m, sd_m, sd_sigma_m = surfdens_radial_profile(gal, mom0=co_mom0,
# max_rad=7 * u.kpc, dr=dr,
# weight_type='mass',
# mass_conversion=co21_mass_conversion)
# rs_hi_m, sd_hi_m, sd_sigma_hi_m = \
# surfdens_radial_profile(gal,
# mom0=hi_mom0,
# max_rad=7 * u.kpc, dr=dr,
# weight_type='mass')
# p.errorbar(np.log10(sd.value), np.log10(sd_m.value),
# xerr=0.434 * sd_sigma.value / sd.value,
# yerr=0.434 * sd_sigma_m.value / sd_m.value,
# fmt="o", label="H$_2$")
# p.errorbar(np.log10(sd_hi.value), np.log10(sd_hi_m.value),
# xerr=0.434 * sd_sigma_hi.value / sd_hi.value,
# yerr=0.434 * sd_sigma_hi_m.value / sd_hi_m.value,
# fmt="D", label="HI")
# equality = np.arange(-2.5, 2, 0.1)
# p.plot(equality, equality, 'k--')
# p.ylabel(r"log Mass-Weighted $\Sigma$ / (M$_{\odot}$ pc$^{-2}$)")
# p.xlabel(r"log Area-Weighted $\Sigma$ / (M$_{\odot}$ pc$^{-2}$)")
# p.ylim([0.25, 1.9])
# p.xlim([-2.1, 1.4])
# p.legend(loc='upper left', frameon=True)
# p.savefig(allfigs_path(osjoin(fig_path, "hi_co_area_weighted_vs_mass_weighted_dr_{}pc.pdf".format(int(dr.value)))))
# p.savefig(allfigs_path(osjoin(fig_path, "hi_co_area_weighted_vs_mass_weighted_dr_{}pc.png".format(int(dr.value)))))
# p.tight_layout()
# p.close()
# The H2 (ie CO) is all over the place, and HI is clustered together and hard
# to see. Make an HI only
# p.errorbar(np.log10(sd_hi.value), np.log10(sd_hi_m.value),
# xerr=0.434 * sd_sigma_hi.value / sd_hi.value,
# yerr=0.434 * sd_sigma_hi_m.value / sd_hi_m.value,
# fmt="D", label="HI")
# equality = np.arange(-2.5, 2, 0.1)
# p.plot(equality, equality, 'k--')
# p.ylabel(r"log Mass-Weighted $\Sigma$ / (M$_{\odot}$ pc$^{-2}$)")
# p.xlabel(r"log Area-Weighted $\Sigma$ / (M$_{\odot}$ pc$^{-2}$)")
# p.ylim([0.65, 1.0])
# p.xlim([0.65, 0.9])
# p.tight_layout()
# p.savefig(allfigs_path(osjoin(fig_path, "area_weighted_vs_mass_weighted_dr_{}pc.pdf".format(int(dr.value)))))
# p.savefig(allfigs_path(osjoin(fig_path, "area_weighted_vs_mass_weighted_dr_{}pc.png".format(int(dr.value)))))
# p.close()
# clump_co = sd_m / sd
# clump_hi = sd_hi_m / sd_hi
default_figure()
| mit |
Supermem/ibis | ibis/tests/test_tasks.py | 9 | 10953 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import pandas as pd
import ibis.compat as compat
from .test_comms import double_ex
from ibis.tasks import IbisTaskMessage, IbisTaskExecutor
from ibis.util import guid
from ibis.wire import BytesIO
import ibis.wire as wire
from ibis.compat import unittest
from ibis.tests.test_server import WorkerTestFixture
try:
from ibis.comms import SharedMmap, IPCLock, IbisTableWriter
SKIP_TESTS = False
except ImportError:
SKIP_TESTS = True
pytestmark = pytest.mark.skipif(SKIP_TESTS or compat.PY3,
reason='Comms extension disabled')
class TestTasks(unittest.TestCase):
def test_message_encode_decode(self):
task = IbisTaskMessage(12345, 'foo', 12, 1000)
encoded = task.encode()
decoded = IbisTaskMessage.decode(encoded)
encoded2 = decoded.encode()
self.assertEqual(encoded, encoded2)
attrs = ['semaphore_id', 'shmem_name', 'shmem_offset', 'shmem_size']
for attr in attrs:
self.assertEqual(getattr(task, attr), getattr(decoded, attr))
class TestPingPongTask(unittest.TestCase):
def setUp(self):
self.paths_to_delete = []
# make size small so tracebacks get truncated
path, size, offset = 'task_%s' % guid(), 36, 0
self.paths_to_delete.append(path)
self.lock = IPCLock(is_slave=0)
self.task = IbisTaskMessage(self.lock.semaphore_id, path,
offset, size)
self.mm = SharedMmap(path, size, create=True)
wire.PackedMessageWriter(self.mm).string('ping')
def tearDown(self):
for path in self.paths_to_delete:
try:
os.remove(path)
except os.error:
pass
def test_execute_task(self):
_execute_task(self.task, self.lock)
self.mm.seek(0)
reader = wire.PackedMessageReader(self.mm)
assert reader.uint8()
assert reader.string() == 'pong'
def _execute_task(task, master_lock):
executor = IbisTaskExecutor(task)
try:
executor.execute()
except:
# Don't deadlock if execute has an exception
executor.lock.release()
raise
# IPCLock has been released
master_lock.acquire()
class TestTaskE2E(TestPingPongTask, WorkerTestFixture):
def setUp(self):
TestPingPongTask.setUp(self)
WorkerTestFixture.setUp(self)
def tearDown(self):
TestPingPongTask.tearDown(self)
WorkerTestFixture.tearDown(self)
def test_task_end_to_end(self):
msg = self._run_task(self.task)
assert msg == 'ok'
self.mm.seek(0)
reader = wire.PackedMessageReader(self.mm)
assert reader.uint8()
assert reader.string() == 'pong'
def test_task_return_exception(self):
self.mm.seek(0)
wire.PackedMessageWriter(self.mm).string('__unknown_task__')
msg = self._run_task(self.task)
assert msg == 'ok'
self.mm.seek(0)
reader = wire.PackedMessageReader(self.mm)
assert reader.uint8() == 0
error_msg = reader.string()
assert 'Traceback' in error_msg
def _run_task(self, task):
encoded_task = task.encode()
worker_port, worker_pid = self._spawn_worker()
sock = self._connect(worker_port)
sock.send(encoded_task)
msg = sock.recv(1024)
# IPCLock has been released
self.lock.acquire()
return msg
def delete_all_guid_files():
import glob
import os
[os.remove(x) for x in glob.glob('*') if len(x) == 32]
class NRows(object):
def __init__(self):
self.total = 0
def update(self, values):
self.total += len(values)
def merge(self, other):
self.total += other.total
return self
def finalize(self):
return self.total
class Summ(object):
def __init__(self):
self.total = 0
def update(self, values):
import pandas as pd
self.total += pd.Series(values).sum()
def merge(self, other):
self.total += other.total
return self
def finalize(self):
return self.total
class TestAggregateTasks(unittest.TestCase):
def _get_mean_uda(self):
# Dynamically generate the class instance. Use pandas so nulls are
# excluded
class Mean(object):
def __init__(self):
self.total = 0
self.count = 0
def update(self, values):
values = pd.Series(values)
self.total += values.sum()
self.count += values.count()
def merge(self, other):
self.total += other.total
self.count += other.count
return self
def finalize(self):
return self.total / float(self.count)
return Mean
def setUp(self):
self.paths_to_delete = []
self.col_fragments = [double_ex(1000) for _ in range(10)]
self.lock = IPCLock(is_slave=0)
def test_update(self):
klass = self._get_mean_uda()
col = self.col_fragments[0]
task, mm = self._make_update_task(klass, [col])
_execute_task(task, self.lock)
mm.seek(0)
reader = wire.PackedMessageReader(mm)
# success
if not reader.uint8():
raise Exception(reader.string())
result = compat.pickle_load(reader.string())
ex_total = pd.Series(col.to_numpy_for_pandas()).sum()
assert result.total == ex_total
# Test with prior state
col = self.col_fragments[1]
task, mm = self._make_update_task(klass, [col], prior_state=result)
# Executor's turn again
self.lock.release()
_execute_task(task, self.lock)
mm.seek(0)
reader = wire.PackedMessageReader(mm)
# success
if not reader.uint8():
raise Exception(reader.string())
result = compat.pickle_load(reader.string())
ex_total += pd.Series(col.to_numpy_for_pandas()).sum()
# pandas will yield 0 on None input strangely
assert ex_total != 0
assert result.total == ex_total
def test_merge(self):
klass = self._get_mean_uda()
lcol = self.col_fragments[0]
rcol = self.col_fragments[1]
left = self._update(klass, [lcol])
right = self._update(klass, [rcol])
task, mm = self._make_merge_task(left, right)
_execute_task(task, self.lock)
mm.seek(0)
reader = wire.PackedMessageReader(mm)
# success
if not reader.uint8():
raise Exception(reader.string())
result = compat.pickle_load(reader.string())
larr = lcol.to_numpy_for_pandas()
rarr = rcol.to_numpy_for_pandas()
assert larr is not None
ex_total = (pd.Series(larr).sum() + pd.Series(rarr).sum())
assert result.total == ex_total
def test_finalize(self):
klass = self._get_mean_uda()
col = self.col_fragments[0]
result = self._update(klass, [col])
task, mm = self._make_finalize_task(result)
_execute_task(task, self.lock)
mm.seek(0)
reader = wire.PackedMessageReader(mm)
# success
if not reader.uint8():
raise Exception(reader.string())
result = compat.pickle_load(reader.string())
arr = col.to_numpy_for_pandas()
ex_result = pd.Series(arr).mean()
assert result == ex_result
def _update(self, klass, args):
task, mm = self._make_update_task(klass, args)
_execute_task(task, self.lock)
self.lock.release()
mm.seek(0)
reader = wire.PackedMessageReader(mm)
# success
if not reader.uint8():
raise Exception(reader.string())
return reader.string()
def _make_update_task(self, uda_class, cols, prior_state=None):
# Overall layout here:
# - task name
# - serialized agg class
# - prior state flag 1/0
# - (optional) serialized prior state
# - serialized table fragment
payload = BytesIO()
msg_writer = wire.PackedMessageWriter(payload)
msg_writer.string('agg-update')
msg_writer.string(compat.pickle_dump(uda_class))
if prior_state is not None:
msg_writer.uint8(1)
msg_writer.string(compat.pickle_dump(prior_state))
else:
msg_writer.uint8(0)
writer = IbisTableWriter(cols)
# Create memory map of the appropriate size
path = 'task_%s' % guid()
size = writer.total_size() + payload.tell()
offset = 0
mm = SharedMmap(path, size, create=True)
self.paths_to_delete.append(path)
mm.write(payload.getvalue())
writer.write(mm)
task = IbisTaskMessage(self.lock.semaphore_id, path, offset, size)
return task, mm
def _make_merge_task(self, left_pickled, right_pickled):
payload = BytesIO()
msg_writer = wire.PackedMessageWriter(payload)
msg_writer.string('agg-merge')
msg_writer.string(left_pickled)
msg_writer.string(right_pickled)
# Create memory map of the appropriate size
path = 'task_%s' % guid()
size = payload.tell()
offset = 0
mm = SharedMmap(path, size, create=True)
self.paths_to_delete.append(path)
mm.write(payload.getvalue())
task = IbisTaskMessage(self.lock.semaphore_id, path, offset, size)
return task, mm
def _make_finalize_task(self, pickled):
payload = BytesIO()
msg_writer = wire.PackedMessageWriter(payload)
msg_writer.string('agg-finalize')
msg_writer.string(pickled)
# Create memory map of the appropriate size
path = 'task_%s' % guid()
size = payload.tell()
offset = 0
mm = SharedMmap(path, size, create=True)
self.paths_to_delete.append(path)
mm.write(payload.getvalue())
task = IbisTaskMessage(self.lock.semaphore_id, path, offset, size)
return task, mm
def tearDown(self):
for path in self.paths_to_delete:
try:
os.remove(path)
except os.error:
pass
| apache-2.0 |
mikegraham/dask | dask/bag/tests/test_bag.py | 1 | 27975 | # coding=utf-8
from __future__ import absolute_import, division, print_function
import pytest
from toolz import (merge, join, pipe, filter, identity, merge_with, take,
partial, valmap)
import math
from dask.bag.core import (Bag, lazify, lazify_task, fuse, map, collect,
reduceby, reify, partition, inline_singleton_lists, optimize,
system_encoding, from_delayed)
from dask.compatibility import BZ2File, GzipFile, reduce
from dask.utils import filetexts, tmpfile, raises, open
from dask.async import get_sync
import dask
import dask.bag as db
import bz2
import io
import shutil
import os
import partd
from collections import Iterator
from dask.utils import tmpdir
dsk = {('x', 0): (range, 5),
('x', 1): (range, 5),
('x', 2): (range, 5)}
L = list(range(5)) * 3
b = Bag(dsk, 'x', 3)
def inc(x):
return x + 1
def iseven(x):
return x % 2 == 0
def isodd(x):
return x % 2 == 1
def add(x, y):
return x + y
def test_Bag():
assert b.name == 'x'
assert b.npartitions == 3
def test_keys():
assert sorted(b._keys()) == sorted(dsk.keys())
def test_map():
c = b.map(inc)
expected = merge(dsk, dict(((c.name, i), (reify, (map, inc, (b.name, i))))
for i in range(b.npartitions)))
assert c.dask == expected
assert c.name == b.map(inc).name
def test_map_function_with_multiple_arguments():
b = db.from_sequence([(1, 10), (2, 20), (3, 30)], npartitions=3)
assert list(b.map(lambda x, y: x + y).compute(get=dask.get)) == [11, 22, 33]
assert list(b.map(list).compute()) == [[1, 10], [2, 20], [3, 30]]
class A(object):
def __init__(self, a, b, c):
pass
class B(object):
def __init__(self, a):
pass
def test_map_with_constructors():
assert db.from_sequence([[1, 2, 3]]).map(A).compute()
assert db.from_sequence([1, 2, 3]).map(B).compute()
assert db.from_sequence([[1, 2, 3]]).map(B).compute()
failed = False
try:
db.from_sequence([[1,]]).map(A).compute()
except TypeError:
failed = True
assert failed
def test_map_with_builtins():
b = db.from_sequence(range(3))
assert ' '.join(b.map(str)) == '0 1 2'
assert b.map(str).map(tuple).compute() == [('0',), ('1',), ('2',)]
assert b.map(str).map(tuple).map(any).compute() == [True, True, True]
b2 = b.map(lambda n: [(n, n+1), (2*(n-1), -n)])
assert b2.map(dict).compute() == [{0: 1, -2: 0}, {1: 2, 0: -1}, {2: -2}]
assert b.map(lambda n: (n, n+1)).map(pow).compute() == [0, 1, 8]
assert b.map(bool).compute() == [False, True, True]
assert db.from_sequence([(1, 'real'), ('1', 'real')]).map(hasattr).compute() == \
[True, False]
def test_map_with_kwargs():
b = db.from_sequence(range(100), npartitions=10)
assert b.map(lambda x, factor=0: x * factor,
factor=2).sum().compute() == 9900.0
assert b.map(lambda x, total=0: x / total,
total=b.sum()).sum().compute() == 1.0
assert b.map(lambda x, factor=0, total=0: x * factor / total,
total=b.sum(),
factor=2).sum().compute() == 2.0
def test_filter():
c = b.filter(iseven)
expected = merge(dsk, dict(((c.name, i),
(reify, (filter, iseven, (b.name, i))))
for i in range(b.npartitions)))
assert c.dask == expected
assert c.name == b.filter(iseven).name
def test_remove():
f = lambda x: x % 2 == 0
c = b.remove(f)
assert list(c) == [1, 3] * 3
assert c.name == b.remove(f).name
def test_iter():
assert sorted(list(b)) == sorted(L)
assert sorted(list(b.map(inc))) == sorted(list(range(1, 6)) * 3)
@pytest.mark.parametrize('func', [str, repr])
def test_repr(func):
assert str(b.npartitions) in func(b)
assert b.name[:5] in func(b)
def test_pluck():
d = {('x', 0): [(1, 10), (2, 20)],
('x', 1): [(3, 30), (4, 40)]}
b = Bag(d, 'x', 2)
assert set(b.pluck(0)) == set([1, 2, 3, 4])
assert set(b.pluck(1)) == set([10, 20, 30, 40])
assert set(b.pluck([1, 0])) == set([(10, 1), (20, 2), (30, 3), (40, 4)])
assert b.pluck([1, 0]).name == b.pluck([1, 0]).name
def test_pluck_with_default():
b = db.from_sequence(['Hello', '', 'World'])
assert raises(IndexError, lambda: list(b.pluck(0)))
assert list(b.pluck(0, None)) == ['H', None, 'W']
assert b.pluck(0, None).name == b.pluck(0, None).name
assert b.pluck(0).name != b.pluck(0, None).name
def test_unzip():
b = db.from_sequence(range(100)).map(lambda x: (x, x + 1, x + 2))
one, two, three = b.unzip(3)
assert list(one) == list(range(100))
assert list(three) == [i + 2 for i in range(100)]
assert one.name == b.unzip(3)[0].name
assert one.name != two.name
def test_fold():
c = b.fold(add)
assert c.compute() == sum(L)
assert c.key == b.fold(add).key
c2 = b.fold(add, initial=10)
assert c2.key != c.key
assert c2.compute() == sum(L) + 10 * b.npartitions
assert c2.key == b.fold(add, initial=10).key
c = db.from_sequence(range(5), npartitions=3)
def binop(acc, x):
acc = acc.copy()
acc.add(x)
return acc
d = c.fold(binop, set.union, initial=set())
assert d.compute() == set(c)
assert d.key == c.fold(binop, set.union, initial=set()).key
d = db.from_sequence('hello')
assert set(d.fold(lambda a, b: ''.join([a, b]), initial='').compute()) == set('hello')
e = db.from_sequence([[1], [2], [3]], npartitions=2)
with dask.set_options(get=get_sync):
assert set(e.fold(add, initial=[]).compute()) == set([1, 2, 3])
def test_distinct():
assert sorted(b.distinct()) == [0, 1, 2, 3, 4]
assert b.distinct().name == b.distinct().name
assert 'distinct' in b.distinct().name
assert b.distinct().count().compute() == 5
def test_frequencies():
c = b.frequencies()
assert dict(c) == {0: 3, 1: 3, 2: 3, 3: 3, 4: 3}
c2 = b.frequencies(split_every=2)
assert dict(c2) == {0: 3, 1: 3, 2: 3, 3: 3, 4: 3}
assert c.name == b.frequencies().name
assert c.name != c2.name
assert c2.name == b.frequencies(split_every=2).name
def test_topk():
assert list(b.topk(4)) == [4, 4, 4, 3]
c = b.topk(4, key=lambda x: -x)
assert list(c) == [0, 0, 0, 1]
c2 = b.topk(4, key=lambda x: -x, split_every=2)
assert list(c2) == [0, 0, 0, 1]
assert c.name != c2.name
assert b.topk(4).name == b.topk(4).name
def test_topk_with_non_callable_key():
b = db.from_sequence([(1, 10), (2, 9), (3, 8)], npartitions=2)
assert list(b.topk(2, key=1)) == [(1, 10), (2, 9)]
assert list(b.topk(2, key=0)) == [(3, 8), (2, 9)]
assert b.topk(2, key=1).name == b.topk(2, key=1).name
def test_topk_with_multiarg_lambda():
b = db.from_sequence([(1, 10), (2, 9), (3, 8)], npartitions=2)
assert list(b.topk(2, key=lambda a, b: b)) == [(1, 10), (2, 9)]
def test_lambdas():
assert list(b.map(lambda x: x + 1)) == list(b.map(inc))
def test_reductions():
assert int(b.count()) == 15
assert int(b.sum()) == 30
assert int(b.max()) == 4
assert int(b.min()) == 0
assert int(b.any()) == True
assert int(b.all()) == False # some zeros exist
assert b.all().key == b.all().key
assert b.all().key != b.any().key
def test_reduction_names():
assert b.sum().name.startswith('sum')
assert b.reduction(sum, sum).name.startswith('sum')
assert any(isinstance(k, str) and k.startswith('max')
for k in b.reduction(sum, max).dask)
assert b.reduction(sum, sum, name='foo').name.startswith('foo')
def test_tree_reductions():
b = db.from_sequence(range(12))
c = b.reduction(sum, sum, split_every=2)
d = b.reduction(sum, sum, split_every=6)
e = b.reduction(sum, sum, split_every=5)
assert c.compute() == d.compute() == e.compute()
assert len(c.dask) > len(d.dask)
c = b.sum(split_every=2)
d = b.sum(split_every=5)
assert c.compute() == d.compute()
assert len(c.dask) > len(d.dask)
assert c.key != d.key
assert c.key == b.sum(split_every=2).key
assert c.key != b.sum().key
def test_mean():
assert b.mean().compute(get=dask.get) == 2.0
assert float(b.mean()) == 2.0
def test_non_splittable_reductions():
np = pytest.importorskip('numpy')
data = list(range(100))
c = db.from_sequence(data, npartitions=10)
assert c.mean().compute() == np.mean(data)
assert c.std().compute(get=dask.get) == np.std(data)
def test_std():
assert b.std().compute(get=dask.get) == math.sqrt(2.0)
assert float(b.std()) == math.sqrt(2.0)
def test_var():
assert b.var().compute(get=dask.get) == 2.0
assert float(b.var()) == 2.0
def test_join():
c = b.join([1, 2, 3], on_self=isodd, on_other=iseven)
assert list(c) == list(join(iseven, [1, 2, 3], isodd, list(b)))
assert list(b.join([1, 2, 3], isodd)) == \
list(join(isodd, [1, 2, 3], isodd, list(b)))
assert c.name == b.join([1, 2, 3], on_self=isodd, on_other=iseven).name
def test_foldby():
c = b.foldby(iseven, add, 0, add, 0)
assert (reduceby, iseven, add, (b.name, 0), 0) in list(c.dask.values())
assert set(c) == set(reduceby(iseven, lambda acc, x: acc + x, L, 0).items())
assert c.name == b.foldby(iseven, add, 0, add, 0).name
c = b.foldby(iseven, lambda acc, x: acc + x)
assert set(c) == set(reduceby(iseven, lambda acc, x: acc + x, L, 0).items())
def test_map_partitions():
assert list(b.map_partitions(len)) == [5, 5, 5]
assert b.map_partitions(len).name == b.map_partitions(len).name
assert b.map_partitions(lambda a: len(a) + 1).name != b.map_partitions(len).name
def test_map_partitions_with_kwargs():
b = db.from_sequence(range(100), npartitions=10)
assert b.map_partitions(
lambda X, factor=0: [x * factor for x in X],
factor=2).sum().compute() == 9900.0
assert b.map_partitions(
lambda X, total=0: [x / total for x in X],
total=b.sum()).sum().compute() == 1.0
assert b.map_partitions(
lambda X, factor=0, total=0: [x * factor / total for x in X],
total=b.sum(),
factor=2).sum().compute() == 2.0
def test_lazify_task():
task = (sum, (reify, (map, inc, [1, 2, 3])))
assert lazify_task(task) == (sum, (map, inc, [1, 2, 3]))
task = (reify, (map, inc, [1, 2, 3]))
assert lazify_task(task) == task
a = (reify, (map, inc,
(reify, (filter, iseven, 'y'))))
b = (reify, (map, inc,
(filter, iseven, 'y')))
assert lazify_task(a) == b
f = lambda x: x
def test_lazify():
a = {'x': (reify, (map, inc,
(reify, (filter, iseven, 'y')))),
'a': (f, 'x'), 'b': (f, 'x')}
b = {'x': (reify, (map, inc,
(filter, iseven, 'y'))),
'a': (f, 'x'), 'b': (f, 'x')}
assert lazify(a) == b
def test_inline_singleton_lists():
inp = {'b': (list, 'a'),
'c': (f, 'b', 1)}
out = {'c': (f, (list, 'a'), 1)}
assert inline_singleton_lists(inp) == out
out = {'c': (f, 'a' , 1)}
assert optimize(inp, ['c']) == out
inp = {'b': (list, 'a'),
'c': (f, 'b', 1),
'd': (f, 'b', 2)}
assert inline_singleton_lists(inp) == inp
inp = {'b': (4, 5)} # doesn't inline constants
assert inline_singleton_lists(inp) == inp
def test_take():
assert list(b.take(2)) == [0, 1]
assert b.take(2) == (0, 1)
assert isinstance(b.take(2, compute=False), Bag)
def test_map_is_lazy():
from dask.bag.core import map
assert isinstance(map(lambda x: x, [1, 2, 3]), Iterator)
def test_can_use_dict_to_make_concrete():
assert isinstance(dict(b.frequencies()), dict)
@pytest.mark.xfail(reason="bloscpack BLOSC_MAX_BUFFERSIZE")
def test_from_castra():
castra = pytest.importorskip('castra')
pd = pytest.importorskip('pandas')
dd = pytest.importorskip('dask.dataframe')
df = pd.DataFrame({'x': list(range(100)),
'y': [str(i) for i in range(100)]})
a = dd.from_pandas(df, 10)
with tmpfile('.castra') as fn:
c = a.to_castra(fn)
default = db.from_castra(c)
with_columns = db.from_castra(c, 'x')
with_index = db.from_castra(c, 'x', index=True)
assert (list(default) == [{'x': i, 'y': str(i)}
for i in range(100)] or
list(default) == [(i, str(i)) for i in range(100)])
assert list(with_columns) == list(range(100))
assert list(with_index) == list(zip(range(100), range(100)))
assert default.name != with_columns.name != with_index.name
assert with_index.name == db.from_castra(c, 'x', index=True).name
@pytest.mark.slow
def test_from_url():
a = db.from_url(['http://google.com', 'http://github.com'])
assert a.npartitions == 2
b = db.from_url('http://raw.githubusercontent.com/dask/dask/master/README.rst')
assert b.npartitions == 1
assert b'Dask\n' in b.take(10)
def test_read_text():
with filetexts({'a1.log': 'A\nB', 'a2.log': 'C\nD'}) as fns:
assert set(line.strip() for line in db.read_text(fns)) == \
set('ABCD')
assert set(line.strip() for line in db.read_text('a*.log')) == \
set('ABCD')
assert raises(ValueError, lambda: db.read_text('non-existent-*-path'))
def test_read_text_large():
with tmpfile() as fn:
with open(fn, 'wb') as f:
f.write(('Hello, world!' + os.linesep).encode() * 100)
b = db.read_text(fn, blocksize=100)
c = db.read_text(fn)
assert len(b.dask) > 5
assert list(map(str, b)) == list(map(str, c))
d = db.read_text([fn], blocksize=100)
assert list(b) == list(d)
def test_read_text_encoding():
with tmpfile() as fn:
with open(fn, 'wb') as f:
f.write((u'你好!' + os.linesep).encode('gb18030') * 100)
b = db.read_text(fn, blocksize=100, encoding='gb18030')
c = db.read_text(fn, encoding='gb18030')
assert len(b.dask) > 5
assert list(map(lambda x: x.encode('utf-8'), b)) == list(map(lambda x: x.encode('utf-8'), c))
d = db.read_text([fn], blocksize=100, encoding='gb18030')
assert list(b) == list(d)
def test_read_text_large_gzip():
with tmpfile('gz') as fn:
f = GzipFile(fn, 'wb')
f.write(b'Hello, world!\n' * 100)
f.close()
with pytest.raises(ValueError):
b = db.read_text(fn, blocksize=100, lineterminator='\n')
c = db.read_text(fn)
assert c.npartitions == 1
@pytest.mark.slow
def test_from_s3():
# note we don't test connection modes with aws_access_key and
# aws_secret_key because these are not on travis-ci
boto = pytest.importorskip('s3fs')
five_tips = (u'total_bill,tip,sex,smoker,day,time,size\n',
u'16.99,1.01,Female,No,Sun,Dinner,2\n',
u'10.34,1.66,Male,No,Sun,Dinner,3\n',
u'21.01,3.5,Male,No,Sun,Dinner,3\n',
u'23.68,3.31,Male,No,Sun,Dinner,2\n')
# test compressed data
e = db.read_text('s3://tip-data/t*.gz')
assert e.take(5) == five_tips
# test all keys in bucket
c = db.read_text('s3://tip-data/*')
assert c.npartitions == 4
def test_from_sequence():
b = db.from_sequence([1, 2, 3, 4, 5], npartitions=3)
assert len(b.dask) == 3
assert set(b) == set([1, 2, 3, 4, 5])
def test_from_long_sequence():
L = list(range(1001))
b = db.from_sequence(L)
assert set(b) == set(L)
def test_product():
b2 = b.product(b)
assert b2.npartitions == b.npartitions**2
assert set(b2) == set([(i, j) for i in L for j in L])
x = db.from_sequence([1, 2, 3, 4])
y = db.from_sequence([10, 20, 30])
z = x.product(y)
assert set(z) == set([(i, j) for i in [1, 2, 3, 4] for j in [10, 20, 30]])
assert z.name != b2.name
assert z.name == x.product(y).name
def test_partition_collect():
with partd.Pickle() as p:
partition(identity, range(6), 3, p)
assert set(p.get(0)) == set([0, 3])
assert set(p.get(1)) == set([1, 4])
assert set(p.get(2)) == set([2, 5])
assert sorted(collect(identity, 0, p, '')) == \
[(0, [0]), (3, [3])]
def test_groupby():
c = b.groupby(identity)
result = dict(c)
assert result == {0: [0, 0 ,0],
1: [1, 1, 1],
2: [2, 2, 2],
3: [3, 3, 3],
4: [4, 4, 4]}
assert c.npartitions == b.npartitions
assert c.name == b.groupby(identity).name
assert c.name != b.groupby(lambda x: x + 1).name
def test_groupby_with_indexer():
b = db.from_sequence([[1, 2, 3], [1, 4, 9], [2, 3, 4]])
result = dict(b.groupby(0))
assert valmap(sorted, result) == {1: [[1, 2, 3], [1, 4, 9]],
2: [[2, 3, 4]]}
def test_groupby_with_npartitions_changed():
result = b.groupby(lambda x: x, npartitions=1)
result2 = dict(result)
assert result2 == {0: [0, 0 ,0],
1: [1, 1, 1],
2: [2, 2, 2],
3: [3, 3, 3],
4: [4, 4, 4]}
assert result.npartitions == 1
def test_concat():
a = db.from_sequence([1, 2, 3])
b = db.from_sequence([4, 5, 6])
c = db.concat([a, b])
assert list(c) == [1, 2, 3, 4, 5, 6]
assert c.name == db.concat([a, b]).name
assert b.concat().name != a.concat().name
assert b.concat().name == b.concat().name
b = db.from_sequence([1, 2, 3]).map(lambda x: x * [1, 2, 3])
assert list(b.concat()) == [1, 2, 3] * sum([1, 2, 3])
def test_concat_after_map():
a = db.from_sequence([1, 2])
b = db.from_sequence([4, 5])
result = db.concat([a.map(inc), b])
assert list(result) == [2, 3, 4, 5]
def test_args():
c = b.map(lambda x: x + 1)
d = Bag(*c._args)
assert list(c) == list(d)
assert c.npartitions == d.npartitions
def test_to_dataframe():
try:
import dask.dataframe
import pandas as pd
except ImportError:
return
b = db.from_sequence([(1, 2), (10, 20), (100, 200)], npartitions=2)
df = b.to_dataframe()
assert list(df.columns) == list(pd.DataFrame(list(b)).columns)
df = b.to_dataframe(columns=['a', 'b'])
assert df.npartitions == b.npartitions
assert list(df.columns) == ['a', 'b']
assert df.a.compute().values.tolist() == list(b.pluck(0))
assert df.b.compute().values.tolist() == list(b.pluck(1))
b = db.from_sequence([{'a': 1, 'b': 2},
{'a': 10, 'b': 20},
{'a': 100, 'b': 200}], npartitions=2)
df2 = b.to_dataframe()
assert (df2.compute().values == df.compute().values).all()
assert df2._name == b.to_dataframe()._name
assert df2._name != df._name
def test_to_textfiles():
b = db.from_sequence(['abc', '123', 'xyz'], npartitions=2)
for ext, myopen in [('gz', GzipFile), ('bz2', BZ2File), ('', open)]:
with tmpdir() as dir:
c = b.to_textfiles(os.path.join(dir, '*.' + ext), compute=False)
assert c.npartitions == b.npartitions
c.compute(get=dask.get)
assert os.path.exists(os.path.join(dir, '1.' + ext))
f = myopen(os.path.join(dir, '1.' + ext), 'rb')
text = f.read()
if hasattr(text, 'decode'):
text = text.decode()
assert 'xyz' in text
f.close()
def test_to_textfiles_encoding():
b = db.from_sequence([u'汽车', u'苹果', u'天气'], npartitions=2)
for ext, myopen in [('gz', GzipFile), ('bz2', BZ2File), ('', open)]:
with tmpdir() as dir:
c = b.to_textfiles(os.path.join(dir, '*.' + ext), encoding='gb18030', compute=False)
assert c.npartitions == b.npartitions
c.compute(get=dask.get)
assert os.path.exists(os.path.join(dir, '1.' + ext))
f = myopen(os.path.join(dir, '1.' + ext), 'rb')
text = f.read()
if hasattr(text, 'decode'):
text = text.decode('gb18030')
assert u'天气' in text
f.close()
def test_to_textfiles_inputs():
B = db.from_sequence(['abc', '123', 'xyz'], npartitions=2)
with tmpfile() as a:
with tmpfile() as b:
B.to_textfiles([a, b])
assert os.path.exists(a)
assert os.path.exists(b)
with tmpfile() as dirname:
B.to_textfiles(dirname)
assert os.path.exists(dirname)
assert os.path.exists(os.path.join(dirname, '0.part'))
assert raises(ValueError, lambda: B.to_textfiles(5))
def test_to_textfiles_endlines():
b = db.from_sequence(['a', 'b', 'c'], npartitions=1)
with tmpfile() as fn:
b.to_textfiles([fn])
with open(fn, 'r') as f:
result = f.readlines()
assert result == ['a\n', 'b\n', 'c']
def test_string_namespace():
b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'],
npartitions=2)
assert 'split' in dir(b.str)
assert 'match' in dir(b.str)
assert list(b.str.lower()) == ['alice smith', 'bob jones', 'charlie smith']
assert list(b.str.split(' ')) == [['Alice', 'Smith'],
['Bob', 'Jones'],
['Charlie', 'Smith']]
assert list(b.str.match('*Smith')) == ['Alice Smith', 'Charlie Smith']
assert raises(AttributeError, lambda: b.str.sfohsofhf)
assert b.str.match('*Smith').name == b.str.match('*Smith').name
assert b.str.match('*Smith').name != b.str.match('*John').name
def test_string_namespace_with_unicode():
b = db.from_sequence([u'Alice Smith', u'Bob Jones', 'Charlie Smith'],
npartitions=2)
assert list(b.str.lower()) == ['alice smith', 'bob jones', 'charlie smith']
def test_str_empty_split():
b = db.from_sequence([u'Alice Smith', u'Bob Jones', 'Charlie Smith'],
npartitions=2)
assert list(b.str.split()) == [['Alice', 'Smith'],
['Bob', 'Jones'],
['Charlie', 'Smith']]
def test_map_with_iterator_function():
b = db.from_sequence([[1, 2, 3], [4, 5, 6]], npartitions=2)
def f(L):
for x in L:
yield x + 1
c = b.map(f)
assert list(c) == [[2, 3, 4], [5, 6, 7]]
def test_ensure_compute_output_is_concrete():
b = db.from_sequence([1, 2, 3])
result = b.map(lambda x: x + 1).compute()
assert not isinstance(result, Iterator)
class BagOfDicts(db.Bag):
def get(self, key, default=None):
return self.map(lambda d: d.get(key, default))
def set(self, key, value):
def setter(d):
d[key] = value
return d
return self.map(setter)
def test_bag_class_extend():
dictbag = BagOfDicts(*db.from_sequence([{'a': {'b': 'c'}}])._args)
assert dictbag.get('a').get('b').compute()[0] == 'c'
assert dictbag.get('a').set('d', 'EXTENSIBILITY!!!').compute()[0] == \
{'b': 'c', 'd': 'EXTENSIBILITY!!!'}
assert isinstance(dictbag.get('a').get('b'), BagOfDicts)
def test_gh715():
bin_data = u'\u20ac'.encode('utf-8')
with tmpfile() as fn:
with open(fn, 'wb') as f:
f.write(bin_data)
a = db.read_text(fn)
assert a.compute()[0] == bin_data.decode('utf-8')
def test_bag_compute_forward_kwargs():
x = db.from_sequence([1, 2, 3]).map(lambda a: a + 1)
x.compute(bogus_keyword=10)
def test_to_delayed():
from dask.delayed import Value
b = db.from_sequence([1, 2, 3, 4, 5, 6], npartitions=3)
a, b, c = b.map(inc).to_delayed()
assert all(isinstance(x, Value) for x in [a, b, c])
assert b.compute() == [4, 5]
b = db.from_sequence([1, 2, 3, 4, 5, 6], npartitions=3)
t = b.sum().to_delayed()
assert isinstance(t, Value)
assert t.compute() == 21
def test_from_delayed():
from dask.delayed import value, do
a, b, c = value([1, 2, 3]), value([4, 5, 6]), value([7, 8, 9])
bb = from_delayed([a, b, c])
assert bb.name == from_delayed([a, b, c]).name
assert isinstance(bb, Bag)
assert list(bb) == [1, 2, 3, 4, 5, 6, 7, 8, 9]
asum_value = do(lambda X: sum(X))(a)
asum_item = db.Item.from_delayed(asum_value)
assert asum_value.compute() == asum_item.compute() == 6
def test_range():
for npartitions in [1, 7, 10, 28]:
b = db.range(100, npartitions=npartitions)
assert len(b.dask) == npartitions
assert b.npartitions == npartitions
assert list(b) == list(range(100))
@pytest.mark.parametrize("npartitions", [1, 7, 10, 28])
def test_zip(npartitions, hi=1000):
evens = db.from_sequence(range(0, hi, 2), npartitions=npartitions)
odds = db.from_sequence(range(1, hi, 2), npartitions=npartitions)
pairs = db.zip(evens, odds)
assert pairs.npartitions == npartitions
assert list(pairs) == list(zip(range(0, hi, 2), range(1, hi, 2)))
def test_repartition():
for x, y in [(10, 5), (7, 3), (5, 1), (5, 4)]:
b = db.from_sequence(range(20), npartitions=x)
c = b.repartition(y)
assert b.npartitions == x
assert c.npartitions == y
assert list(b) == c.compute(get=dask.get)
try:
b.repartition(100)
except NotImplementedError as e:
assert '100' in str(e)
@pytest.mark.skipif('not db.core._implement_accumulate')
def test_accumulate():
parts = [[1, 2, 3], [4, 5], [], [6, 7]]
dsk = dict((('test', i), p) for (i, p) in enumerate(parts))
b = db.Bag(dsk, 'test', len(parts))
r = b.accumulate(add)
assert r.name == b.accumulate(add).name
assert r.name != b.accumulate(add, -1).name
assert r.compute() == [1, 3, 6, 10, 15, 21, 28]
assert b.accumulate(add, -1).compute() == [-1, 0, 2, 5, 9, 14, 20, 27]
assert b.accumulate(add).map(inc).compute() == [2, 4, 7, 11, 16, 22, 29]
b = db.from_sequence([1, 2, 3], npartitions=1)
assert b.accumulate(add).compute() == [1, 3, 6]
def test_groupby_tasks():
b = db.from_sequence(range(160), npartitions=4)
out = b.groupby(lambda x: x % 10, max_branch=4, method='tasks')
partitions = dask.get(out.dask, out._keys())
for a in partitions:
for b in partitions:
if a is not b:
assert not set(a) & set(b)
b = db.from_sequence(range(1000), npartitions=100)
out = b.groupby(lambda x: x % 123, method='tasks')
assert len(out.dask) < 100**2
partitions = dask.get(out.dask, out._keys())
for a in partitions:
for b in partitions:
if a is not b:
assert not set(a) & set(b)
b = db.from_sequence(range(10000), npartitions=345)
out = b.groupby(lambda x: x % 2834, max_branch=24, method='tasks')
partitions = dask.get(out.dask, out._keys())
for a in partitions:
for b in partitions:
if a is not b:
assert not set(a) & set(b)
def test_groupby_tasks_names():
b = db.from_sequence(range(160), npartitions=4)
func = lambda x: x % 10
func2 = lambda x: x % 20
assert (set(b.groupby(func, max_branch=4, method='tasks').dask) ==
set(b.groupby(func, max_branch=4, method='tasks').dask))
assert (set(b.groupby(func, max_branch=4, method='tasks').dask) !=
set(b.groupby(func, max_branch=2, method='tasks').dask))
assert (set(b.groupby(func, max_branch=4, method='tasks').dask) !=
set(b.groupby(func2, max_branch=4, method='tasks').dask))
def test_to_textfiles_empty_partitions():
with tmpdir() as d:
b = db.range(5, npartitions=5).filter(lambda x: x == 1).map(str)
b.to_textfiles(os.path.join(d, '*.txt'))
assert len(os.listdir(d)) == 5
| bsd-3-clause |
vishnumani2009/OpenSource-Open-Ended-Statistical-toolkit | FRONTEND/lassofront.py | 2 | 4919 | from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(235, 342)
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setGeometry(QtCore.QRect(10, 10, 211, 61))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.lineEdit = QtGui.QLineEdit(self.groupBox)
self.lineEdit.setGeometry(QtCore.QRect(40, 20, 141, 20))
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.groupBox_2 = QtGui.QGroupBox(Form)
self.groupBox_2.setGeometry(QtCore.QRect(10, 70, 211, 171))
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.label = QtGui.QLabel(self.groupBox_2)
self.label.setGeometry(QtCore.QRect(50, 20, 111, 16))
self.label.setObjectName(_fromUtf8("label"))
self.doubleSpinBox = QtGui.QDoubleSpinBox(self.groupBox_2)
self.doubleSpinBox.setGeometry(QtCore.QRect(110, 20, 62, 22))
self.doubleSpinBox.setObjectName(_fromUtf8("doubleSpinBox"))
self.label_2 = QtGui.QLabel(self.groupBox_2)
self.label_2.setGeometry(QtCore.QRect(30, 60, 111, 16))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.spinBox = QtGui.QSpinBox(self.groupBox_2)
self.spinBox.setGeometry(QtCore.QRect(110, 60, 61, 22))
self.spinBox.setMaximum(10000000)
self.spinBox.setObjectName(_fromUtf8("spinBox"))
self.spinBox.valueChanged.connect(self.setalpha)
self.checkBox = QtGui.QCheckBox(self.groupBox_2)
self.checkBox.setGeometry(QtCore.QRect(30, 90, 81, 17))
self.checkBox.setObjectName(_fromUtf8("checkBox"))
self.checkBox_2 = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_2.setGeometry(QtCore.QRect(120, 90, 121, 17))
self.checkBox_2.setObjectName(_fromUtf8("checkBox_2"))
self.checkBox_3 = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_3.setGeometry(QtCore.QRect(30, 120, 81, 17))
self.checkBox_3.setObjectName(_fromUtf8("checkBox_3"))
self.pushButton_3 = QtGui.QPushButton(Form)
self.pushButton_3.setGeometry(QtCore.QRect(40, 280, 161, 23))
self.pushButton_3.setObjectName(_fromUtf8("pushButton_3"))
self.pushButton = QtGui.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(40, 250, 161, 23))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.pushButton.clicked.connect(self.takeinput)
#self.pushButton_2.clicked.connect(self.takeoutput)
self.pushButton_3.clicked.connect(self.startlasso)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def setalpha(self):
self.alpha=self.spinBox.value()
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.groupBox.setTitle(_translate("Form", "Regressor Name", None))
self.lineEdit.setText(_translate("Form", "LASSO(L1)", None))
self.groupBox_2.setTitle(_translate("Form", "Options", None))
self.label.setText(_translate("Form", "Alpha", None))
self.label_2.setText(_translate("Form", "Max iterations", None))
self.checkBox.setText(_translate("Form", " Normalise", None))
self.checkBox_2.setText(_translate("Form", "Positive", None))
self.checkBox_3.setText(_translate("Form", "Fit intercept", None))
self.pushButton_3.setText(_translate("Form", "Start", None))
self.pushButton.setText(_translate("Form", "Input File", None))
def takeinput(self):
self.fname1 = QtGui.QFileDialog.getOpenFileName(None, 'Open file', 'C:')
self.xdata=[]
self.ydata=[]
for line in open(str(self.fname1)):
row=line.split("\n")[0].split(",")
self.ydata.append(row.pop())
self.xdata.append(row)
print self.xdata
print self.ydata
def startlasso(self):
import numpy as np
X=np.array(self.xdata)
Y=np.array(self.ydata)
X=[[float(y) for y in x] for x in X]
Y=[float(y) for y in Y]
from sklearn import linear_model
clf = linear_model.Lasso(alpha=0.2)
clf.fit (X,Y)
print clf.coef_
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Dialog = QtGui.QDialog()
ui = Ui_Form()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| gpl-3.0 |
jakobworldpeace/scikit-learn | benchmarks/bench_covertype.py | 57 | 7378 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
# Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
# Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
# Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3),
'SAG': LogisticRegression(solver='sag', max_iter=2, C=1000)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
easytaxibr/airflow | airflow/hooks/dbapi_hook.py | 15 | 9272 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import str
from past.builtins import basestring
from datetime import datetime
import numpy
import logging
import sys
from sqlalchemy import create_engine
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
class DbApiHook(BaseHook):
"""
Abstract base class for sql hooks.
"""
# Override to provide the connection name.
conn_name_attr = None
# Override to have a default connection id for a particular dbHook
default_conn_name = 'default_conn_id'
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector = None
def __init__(self, *args, **kwargs):
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
def get_conn(self):
"""Returns a connection object
"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(
host=db.host,
port=db.port,
username=db.login,
schema=db.schema)
def get_uri(self):
conn = self.get_connection(getattr(self, self.conn_name_attr))
login = ''
if conn.login:
login = '{conn.login}:{conn.password}@'.format(conn=conn)
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
return '{conn.conn_type}://{login}{host}/{conn.schema}'.format(
conn=conn, login=login, host=host)
def get_sqlalchemy_engine(self, engine_kwargs=None):
if engine_kwargs is None:
engine_kwargs = {}
return create_engine(self.get_uri(), **engine_kwargs)
def get_pandas_df(self, sql, parameters=None):
"""
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
import pandas.io.sql as psql
conn = self.get_conn()
df = psql.read_sql(sql, con=conn, params=parameters)
conn.close()
return df
def get_records(self, sql, parameters=None):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
conn = self.get_conn()
cur = self.get_cursor()
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
rows = cur.fetchall()
cur.close()
conn.close()
return rows
def get_first(self, sql, parameters=None):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
conn = self.get_conn()
cur = conn.cursor()
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
rows = cur.fetchone()
cur.close()
conn.close()
return rows
def run(self, sql, autocommit=False, parameters=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
conn = self.get_conn()
if isinstance(sql, basestring):
sql = [sql]
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
cur = conn.cursor()
for s in sql:
if sys.version_info[0] < 3:
s = s.encode('utf-8')
logging.info(s)
if parameters is not None:
cur.execute(s, parameters)
else:
cur.execute(s)
cur.close()
conn.commit()
conn.close()
def set_autocommit(self, conn, autocommit):
conn.autocommit = autocommit
def get_cursor(self):
"""
Returns a cursor
"""
return self.get_conn().cursor()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
A generic way to insert a set of tuples into a table,
the whole set of inserts is treated as one transaction
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
"""
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
conn = self.get_conn()
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
cur = conn.cursor()
i = 0
for row in rows:
i += 1
l = []
for cell in row:
l.append(self._serialize_cell(cell, conn))
values = tuple(l)
sql = "INSERT INTO {0} {1} VALUES ({2});".format(
table,
target_fields,
",".join(values))
cur.execute(sql)
if commit_every and i % commit_every == 0:
conn.commit()
logging.info(
"Loaded {i} into {table} rows so far".format(**locals()))
conn.commit()
cur.close()
conn.close()
logging.info(
"Done loading. Loaded a total of {i} rows".format(**locals()))
@staticmethod
def _serialize_cell(cell, conn=None):
"""
Returns the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The serialized cell
:rtype: str
"""
if isinstance(cell, basestring):
return "'" + str(cell).replace("'", "''") + "'"
elif cell is None:
return 'NULL'
elif isinstance(cell, numpy.datetime64):
return "'" + str(cell) + "'"
elif isinstance(cell, datetime):
return "'" + cell.isoformat() + "'"
else:
return str(cell)
def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file
:param table: The name of the source table
:type table: str
:param tmp_file: The path of the target file
:type tmp_file: str
"""
raise NotImplementedError()
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
:param table: The name of the target table
:type table: str
:param tmp_file: The path of the file to load into the table
:type tmp_file: str
"""
raise NotImplementedError()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.