repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
WeichenXu123/spark | python/pyspark/sql/tests/test_pandas_udf_grouped_agg.py | 2 | 19354 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.rdd import PythonEvalType
from pyspark.sql import Row
from pyspark.sql.functions import array, explode, col, lit, mean, sum, \
udf, pandas_udf, PandasUDFType
from pyspark.sql.types import *
from pyspark.sql.utils import AnalysisException
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
import pandas as pd
from pandas.util.testing import assert_frame_equal
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class GroupedAggPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))) \
.drop('vs') \
.withColumn('w', lit(1.0))
@property
def python_plus_one(self):
@udf('double')
def plus_one(v):
assert isinstance(v, (int, float))
return v + 1
return plus_one
@property
def pandas_scalar_plus_two(self):
@pandas_udf('double', PandasUDFType.SCALAR)
def plus_two(v):
assert isinstance(v, pd.Series)
return v + 2
return plus_two
@property
def pandas_agg_mean_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_sum_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def sum(v):
return v.sum()
return sum
@property
def pandas_agg_weighted_mean_udf(self):
import numpy as np
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def weighted_mean(v, w):
return np.average(v, weights=w)
return weighted_mean
def test_manual(self):
df = self.data
sum_udf = self.pandas_agg_sum_udf
mean_udf = self.pandas_agg_mean_udf
mean_arr_udf = pandas_udf(
self.pandas_agg_mean_udf.func,
ArrayType(self.pandas_agg_mean_udf.returnType),
self.pandas_agg_mean_udf.evalType)
result1 = df.groupby('id').agg(
sum_udf(df.v),
mean_udf(df.v),
mean_arr_udf(array(df.v))).sort('id')
expected1 = self.spark.createDataFrame(
[[0, 245.0, 24.5, [24.5]],
[1, 255.0, 25.5, [25.5]],
[2, 265.0, 26.5, [26.5]],
[3, 275.0, 27.5, [27.5]],
[4, 285.0, 28.5, [28.5]],
[5, 295.0, 29.5, [29.5]],
[6, 305.0, 30.5, [30.5]],
[7, 315.0, 31.5, [31.5]],
[8, 325.0, 32.5, [32.5]],
[9, 335.0, 33.5, [33.5]]],
['id', 'sum(v)', 'avg(v)', 'avg(array(v))'])
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_basic(self):
df = self.data
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
# Groupby one column and aggregate one UDF with literal
result1 = df.groupby('id').agg(weighted_mean_udf(df.v, lit(1.0))).sort('id')
expected1 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort('id')
assert_frame_equal(expected1.toPandas(), result1.toPandas())
# Groupby one expression and aggregate one UDF with literal
result2 = df.groupby((col('id') + 1)).agg(weighted_mean_udf(df.v, lit(1.0)))\
.sort(df.id + 1)
expected2 = df.groupby((col('id') + 1))\
.agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort(df.id + 1)
assert_frame_equal(expected2.toPandas(), result2.toPandas())
# Groupby one column and aggregate one UDF without literal
result3 = df.groupby('id').agg(weighted_mean_udf(df.v, df.w)).sort('id')
expected3 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, w)')).sort('id')
assert_frame_equal(expected3.toPandas(), result3.toPandas())
# Groupby one expression and aggregate one UDF without literal
result4 = df.groupby((col('id') + 1).alias('id'))\
.agg(weighted_mean_udf(df.v, df.w))\
.sort('id')
expected4 = df.groupby((col('id') + 1).alias('id'))\
.agg(mean(df.v).alias('weighted_mean(v, w)'))\
.sort('id')
assert_frame_equal(expected4.toPandas(), result4.toPandas())
def test_unsupported_types(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
pandas_udf(
lambda x: x,
ArrayType(ArrayType(TimestampType())),
PandasUDFType.GROUPED_AGG)
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
@pandas_udf('mean double, std double', PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return v.mean(), v.std()
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
@pandas_udf(MapType(DoubleType(), DoubleType()), PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return {v.mean(): v.std()}
def test_alias(self):
df = self.data
mean_udf = self.pandas_agg_mean_udf
result1 = df.groupby('id').agg(mean_udf(df.v).alias('mean_alias'))
expected1 = df.groupby('id').agg(mean(df.v).alias('mean_alias'))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
"""
Test mixing group aggregate pandas UDF with sql expression.
"""
df = self.data
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF with sql expression
result1 = (df.groupby('id')
.agg(sum_udf(df.v) + 1)
.sort('id'))
expected1 = (df.groupby('id')
.agg(sum(df.v) + 1)
.sort('id'))
# Mix group aggregate pandas UDF with sql expression (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(df.v + 1))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(df.v + 1))
.sort('id'))
# Wrap group aggregate pandas UDF with two sql expressions
result3 = (df.groupby('id')
.agg(sum_udf(df.v + 1) + 2)
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(df.v + 1) + 2)
.sort('id'))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
def test_mixed_udfs(self):
"""
Test mixing group aggregate pandas UDF with python UDF and scalar pandas UDF.
"""
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF and python UDF
result1 = (df.groupby('id')
.agg(plus_one(sum_udf(df.v)))
.sort('id'))
expected1 = (df.groupby('id')
.agg(plus_one(sum(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and python UDF (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(plus_one(df.v)))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(plus_one(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF
result3 = (df.groupby('id')
.agg(sum_udf(plus_two(df.v)))
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(plus_two(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF (order swapped)
result4 = (df.groupby('id')
.agg(plus_two(sum_udf(df.v)))
.sort('id'))
expected4 = (df.groupby('id')
.agg(plus_two(sum(df.v)))
.sort('id'))
# Wrap group aggregate pandas UDF with two python UDFs and use python UDF in groupby
result5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum_udf(plus_one(df.v))))
.sort('plus_one(id)'))
expected5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum(plus_one(df.v))))
.sort('plus_one(id)'))
# Wrap group aggregate pandas UDF with two scala pandas UDF and user scala pandas UDF in
# groupby
result6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum_udf(plus_two(df.v))))
.sort('plus_two(id)'))
expected6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum(plus_two(df.v))))
.sort('plus_two(id)'))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
assert_frame_equal(expected4.toPandas(), result4.toPandas())
assert_frame_equal(expected5.toPandas(), result5.toPandas())
assert_frame_equal(expected6.toPandas(), result6.toPandas())
def test_multiple_udfs(self):
"""
Test multiple group aggregate pandas UDFs in one agg function.
"""
df = self.data
mean_udf = self.pandas_agg_mean_udf
sum_udf = self.pandas_agg_sum_udf
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
result1 = (df.groupBy('id')
.agg(mean_udf(df.v),
sum_udf(df.v),
weighted_mean_udf(df.v, df.w))
.sort('id')
.toPandas())
expected1 = (df.groupBy('id')
.agg(mean(df.v),
sum(df.v),
mean(df.v).alias('weighted_mean(v, w)'))
.sort('id')
.toPandas())
assert_frame_equal(expected1, result1)
def test_complex_groupby(self):
df = self.data
sum_udf = self.pandas_agg_sum_udf
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
# groupby one expression
result1 = df.groupby(df.v % 2).agg(sum_udf(df.v))
expected1 = df.groupby(df.v % 2).agg(sum(df.v))
# empty groupby
result2 = df.groupby().agg(sum_udf(df.v))
expected2 = df.groupby().agg(sum(df.v))
# groupby one column and one sql expression
result3 = df.groupby(df.id, df.v % 2).agg(sum_udf(df.v)).orderBy(df.id, df.v % 2)
expected3 = df.groupby(df.id, df.v % 2).agg(sum(df.v)).orderBy(df.id, df.v % 2)
# groupby one python UDF
result4 = df.groupby(plus_one(df.id)).agg(sum_udf(df.v))
expected4 = df.groupby(plus_one(df.id)).agg(sum(df.v))
# groupby one scalar pandas UDF
result5 = df.groupby(plus_two(df.id)).agg(sum_udf(df.v))
expected5 = df.groupby(plus_two(df.id)).agg(sum(df.v))
# groupby one expression and one python UDF
result6 = df.groupby(df.v % 2, plus_one(df.id)).agg(sum_udf(df.v))
expected6 = df.groupby(df.v % 2, plus_one(df.id)).agg(sum(df.v))
# groupby one expression and one scalar pandas UDF
result7 = df.groupby(df.v % 2, plus_two(df.id)).agg(sum_udf(df.v)).sort('sum(v)')
expected7 = df.groupby(df.v % 2, plus_two(df.id)).agg(sum(df.v)).sort('sum(v)')
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
assert_frame_equal(expected4.toPandas(), result4.toPandas())
assert_frame_equal(expected5.toPandas(), result5.toPandas())
assert_frame_equal(expected6.toPandas(), result6.toPandas())
assert_frame_equal(expected7.toPandas(), result7.toPandas())
def test_complex_expressions(self):
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Test complex expressions with sql expression, python UDF and
# group aggregate pandas UDF
result1 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_one(sum_udf(col('v1'))),
sum_udf(plus_one(col('v2'))))
.sort('id')
.toPandas())
expected1 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_one(sum(col('v1'))),
sum(plus_one(col('v2'))))
.sort('id')
.toPandas())
# Test complex expressions with sql expression, scala pandas UDF and
# group aggregate pandas UDF
result2 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_two(sum_udf(col('v1'))),
sum_udf(plus_two(col('v2'))))
.sort('id')
.toPandas())
expected2 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_two(sum(col('v1'))),
sum(plus_two(col('v2'))))
.sort('id')
.toPandas())
# Test sequential groupby aggregate
result3 = (df.groupby('id')
.agg(sum_udf(df.v).alias('v'))
.groupby('id')
.agg(sum_udf(col('v')))
.sort('id')
.toPandas())
expected3 = (df.groupby('id')
.agg(sum(df.v).alias('v'))
.groupby('id')
.agg(sum(col('v')))
.sort('id')
.toPandas())
assert_frame_equal(expected1, result1)
assert_frame_equal(expected2, result2)
assert_frame_equal(expected3, result3)
def test_retain_group_columns(self):
with self.sql_conf({"spark.sql.retainGroupColumns": False}):
df = self.data
sum_udf = self.pandas_agg_sum_udf
result1 = df.groupby(df.id).agg(sum_udf(df.v))
expected1 = df.groupby(df.id).agg(sum(df.v))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_array_type(self):
df = self.data
array_udf = pandas_udf(lambda x: [1.0, 2.0], 'array<double>', PandasUDFType.GROUPED_AGG)
result1 = df.groupby('id').agg(array_udf(df['v']).alias('v2'))
self.assertEquals(result1.first()['v2'], [1.0, 2.0])
def test_invalid_args(self):
df = self.data
plus_one = self.python_plus_one
mean_udf = self.pandas_agg_mean_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'nor.*aggregate function'):
df.groupby(df.id).agg(plus_one(df.v)).collect()
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'aggregate function.*argument.*aggregate function'):
df.groupby(df.id).agg(mean_udf(mean_udf(df.v))).collect()
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'mixture.*aggregate function.*group aggregate pandas UDF'):
df.groupby(df.id).agg(mean_udf(df.v), mean(df.v)).collect()
def test_register_vectorized_udf_basic(self):
sum_pandas_udf = pandas_udf(
lambda v: v.sum(), "integer", PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF)
self.assertEqual(sum_pandas_udf.evalType, PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF)
group_agg_pandas_udf = self.spark.udf.register("sum_pandas_udf", sum_pandas_udf)
self.assertEqual(group_agg_pandas_udf.evalType, PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF)
q = "SELECT sum_pandas_udf(v1) FROM VALUES (3, 0), (2, 0), (1, 1) tbl(v1, v2) GROUP BY v2"
actual = sorted(map(lambda r: r[0], self.spark.sql(q).collect()))
expected = [1, 5]
self.assertEqual(actual, expected)
def test_grouped_with_empty_partition(self):
data = [Row(id=1, x=2), Row(id=1, x=3), Row(id=2, x=4)]
expected = [Row(id=1, sum=5), Row(id=2, x=4)]
num_parts = len(data) + 1
df = self.spark.createDataFrame(self.sc.parallelize(data, numSlices=num_parts))
f = pandas_udf(lambda x: x.sum(),
'int', PandasUDFType.GROUPED_AGG)
result = df.groupBy('id').agg(f(df['x']).alias('sum')).collect()
self.assertEqual(result, expected)
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_grouped_agg import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
d-mittal/pystruct | examples/plot_snakes.py | 4 | 6386 | """
==============================================
Conditional Interactions on the Snakes Dataset
==============================================
This example uses the snake dataset introduced in
Nowozin, Rother, Bagon, Sharp, Yao, Kohli: Decision Tree Fields ICCV 2011
This dataset is specifically designed to require the pairwise interaction terms
to be conditioned on the input, in other words to use non-trival edge-features.
The task is as following: a "snake" of length ten wandered over a grid. For
each cell, it had the option to go up, down, left or right (unless it came from
there). The input consists of these decisions, while the desired output is an
annotation of the snake from 0 (head) to 9 (tail). See the plots for an
example.
As input features we use a 3x3 window around each pixel (and pad with background
where necessary). We code the five different input colors (for up, down, left, right,
background) using a one-hot encoding. This is a rather naive approach, not using any
information about the dataset (other than that it is a 2d grid).
The task can not be solved using the simple DirectionalGridCRF - which can only
infer head and tail (which are also possible to infer just from the unary
features). If we add edge-features that contain the features of the nodes that are
connected by the edge, the CRF can solve the task.
From an inference point of view, this task is very hard. QPBO move-making is
not able to solve it alone, so we use the relaxed AD3 inference for learning.
PS: This example runs a bit (5 minutes on 12 cores, 20 minutes on one core for me).
But it does work as well as Decision Tree Fields ;)
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import label_binarize
from sklearn.metrics import confusion_matrix, accuracy_score
from pystruct.learners import OneSlackSSVM
from pystruct.datasets import load_snakes
from pystruct.utils import make_grid_edges, edge_list_to_features
from pystruct.models import EdgeFeatureGraphCRF
def one_hot_colors(x):
x = x / 255
flat = np.dot(x.reshape(-1, 3), 2 ** np.arange(3))
one_hot = label_binarize(flat, classes=[1, 2, 3, 4, 6])
return one_hot.reshape(x.shape[0], x.shape[1], 5)
def neighborhood_feature(x):
"""Add a 3x3 neighborhood around each pixel as a feature."""
# we could also use a four neighborhood, that would work even better
# but one might argue then we are using domain knowledge ;)
features = np.zeros((x.shape[0], x.shape[1], 5, 9))
# position 3 is background.
features[:, :, 3, :] = 1
features[1:, 1:, :, 0] = x[:-1, :-1, :]
features[:, 1:, :, 1] = x[:, :-1, :]
features[:-1, 1:, :, 2] = x[1:, :-1, :]
features[1:, :, :, 3] = x[:-1, :, :]
features[:-1, :-1, :, 4] = x[1:, 1:, :]
features[:-1, :, :, 5] = x[1:, :, :]
features[1:, :-1, :, 6] = x[:-1, 1:, :]
features[:, :-1, :, 7] = x[:, 1:, :]
features[:, :, :, 8] = x[:, :, :]
return features.reshape(x.shape[0] * x.shape[1], -1)
def prepare_data(X):
X_directions = []
X_edge_features = []
for x in X:
# get edges in grid
right, down = make_grid_edges(x, return_lists=True)
edges = np.vstack([right, down])
# use 3x3 patch around each point
features = neighborhood_feature(x)
# simple edge feature that encodes just if an edge is horizontal or
# vertical
edge_features_directions = edge_list_to_features([right, down])
# edge feature that contains features from the nodes that the edge connects
edge_features = np.zeros((edges.shape[0], features.shape[1], 4))
edge_features[:len(right), :, 0] = features[right[:, 0]]
edge_features[:len(right), :, 1] = features[right[:, 1]]
edge_features[len(right):, :, 0] = features[down[:, 0]]
edge_features[len(right):, :, 1] = features[down[:, 1]]
edge_features = edge_features.reshape(edges.shape[0], -1)
X_directions.append((features, edges, edge_features_directions))
X_edge_features.append((features, edges, edge_features))
return X_directions, X_edge_features
print("Please be patient. Learning will take 5-20 minutes.")
snakes = load_snakes()
X_train, Y_train = snakes['X_train'], snakes['Y_train']
X_train = [one_hot_colors(x) for x in X_train]
Y_train_flat = [y_.ravel() for y_ in Y_train]
X_train_directions, X_train_edge_features = prepare_data(X_train)
inference = 'qpbo'
# first, train on X with directions only:
crf = EdgeFeatureGraphCRF(inference_method=inference)
ssvm = OneSlackSSVM(crf, inference_cache=50, C=.1, tol=.1, max_iter=100,
n_jobs=1)
ssvm.fit(X_train_directions, Y_train_flat)
# Evaluate using confusion matrix.
# Clearly the middel of the snake is the hardest part.
X_test, Y_test = snakes['X_test'], snakes['Y_test']
X_test = [one_hot_colors(x) for x in X_test]
Y_test_flat = [y_.ravel() for y_ in Y_test]
X_test_directions, X_test_edge_features = prepare_data(X_test)
Y_pred = ssvm.predict(X_test_directions)
print("Results using only directional features for edges")
print("Test accuracy: %.3f"
% accuracy_score(np.hstack(Y_test_flat), np.hstack(Y_pred)))
print(confusion_matrix(np.hstack(Y_test_flat), np.hstack(Y_pred)))
# now, use more informative edge features:
crf = EdgeFeatureGraphCRF(inference_method=inference)
ssvm = OneSlackSSVM(crf, inference_cache=50, C=.1, tol=.1, switch_to='ad3',
n_jobs=-1)
ssvm.fit(X_train_edge_features, Y_train_flat)
Y_pred2 = ssvm.predict(X_test_edge_features)
print("Results using also input features for edges")
print("Test accuracy: %.3f"
% accuracy_score(np.hstack(Y_test_flat), np.hstack(Y_pred2)))
print(confusion_matrix(np.hstack(Y_test_flat), np.hstack(Y_pred2)))
# plot stuff
fig, axes = plt.subplots(2, 2)
axes[0, 0].imshow(snakes['X_test'][0], interpolation='nearest')
axes[0, 0].set_title('Input')
y = Y_test[0].astype(np.int)
bg = 2 * (y != 0) # enhance contrast
axes[0, 1].matshow(y + bg, cmap=plt.cm.Greys)
axes[0, 1].set_title("Ground Truth")
axes[1, 0].matshow(Y_pred[0].reshape(y.shape) + bg, cmap=plt.cm.Greys)
axes[1, 0].set_title("Prediction w/o edge features")
axes[1, 1].matshow(Y_pred2[0].reshape(y.shape) + bg, cmap=plt.cm.Greys)
axes[1, 1].set_title("Prediction with edge features")
for a in axes.ravel():
a.set_xticks(())
a.set_yticks(())
plt.show()
| bsd-2-clause |
MartinDelzant/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
augmify/omim | tools/python/city_radius.py | 53 | 4375 | import sys, os, math
import matplotlib.pyplot as plt
from optparse import OptionParser
cities = []
def strip(s):
return s.strip('\t\n ')
def load_data(path):
global cities
f = open(path, 'r')
lines = f.readlines()
f.close();
for l in lines:
if l.startswith('#'):
continue
data = l.split('|')
if len(data) < 6:
continue
item = {}
item['name'] = strip(data[0])
item['population'] = int(strip(data[1]))
item['region'] = strip(data[2])
item['width'] = float(strip(data[3]))
item['height'] = float(strip(data[4]))
item['square'] = float(data[5])
cities.append(item)
# build plot
print "Cities count: %d" % len(cities)
def formula(popul, base = 32, mult = 0.5):
#return math.exp(math.log(popul, base)) * mult
return math.pow(popul, 1 / base) * mult
def avgDistance(approx, data):
dist = 0
for x in xrange(len(data)):
dist += math.fabs(approx[x] - data[x])
return dist / float(len(data))
def findBest(popul, data, minBase = 5, maxBase = 100, stepBase = 0.1, minMult = 0.01, maxMult = 1, stepMult = 0.01):
# try to find best parameters
base = minBase
minDist = -1
bestMult = minMult
bestBase = base
while base <= maxBase:
print "%.02f%% best mult: %f, best base: %f, best dist: %f" % (100 * (base - minBase) / (maxBase - minBase), bestMult, bestBase, minDist)
mult = minMult
while mult <= maxMult:
approx = []
for p in popul:
approx.append(formula(p, base, mult))
dist = avgDistance(approx, data)
if minDist < 0 or minDist > dist:
minDist = dist
bestBase = base
bestMult = mult
mult += stepMult
base += stepBase
return (bestBase, bestMult)
def process_data(steps_count, base, mult, bestFind = False, dataFlag = 0):
avgData = []
maxData = []
sqrData = []
population = []
maxPopulation = 0
minPopulation = -1
for city in cities:
p = city['population']
w = city['width']
h = city['height']
s = city['square']
population.append(p)
if p > maxPopulation:
maxPopulation = p
if minPopulation < 0 or p < minPopulation:
minPopulation = p
maxData.append(max([w, h]))
avgData.append((w + h) * 0.5)
sqrData.append(math.sqrt(s))
bestBase = base
bestMult = mult
if bestFind:
d = maxData
if dataFlag == 1:
d = avgData
elif dataFlag == 2:
d = sqrData
bestBase, bestMult = findBest(population, d)
print "Finished\n\nBest mult: %f, Best base: %f" % (bestMult, bestBase)
approx = []
population2 = []
v = minPopulation
step = (maxPopulation - minPopulation) / float(steps_count)
for i in xrange(0, steps_count):
approx.append(formula(v, bestBase, bestMult))
population2.append(v)
v += step
plt.plot(population, avgData, 'bo', population, maxData, 'ro', population, sqrData, 'go', population2, approx, 'y')
plt.axis([minPopulation, maxPopulation, 0, 100])
plt.xscale('log')
plt.show()
if __name__ == "__main__":
if len(sys.argv) < 3:
print 'city_radius.py <data_file> <steps>'
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename", default="city_popul_sqr.data",
help="source data file", metavar="path")
parser.add_option("-s", "--scan",
dest="best", default=False, action="store_true",
help="scan best values of mult and base")
parser.add_option('-m', "--mult",
dest='mult', default=1,
help='multiplier value')
parser.add_option('-b', '--base',
dest='base', default=3.6,
help="base value")
parser.add_option('-d', '--data',
default=0, dest='data',
help="Dataset to use on best values scan: 0 - max, 1 - avg, 2 - sqr")
(options, args) = parser.parse_args()
load_data(options.filename)
process_data(1000, float(options.base), float(options.mult), options.best, int(options.data))
| apache-2.0 |
yuanagain/seniorthesis | venv/lib/python2.7/site-packages/matplotlib/tight_bbox.py | 8 | 2627 | """
This module is to support *bbox_inches* option in savefig command.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import warnings
from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
def adjust_bbox(fig, bbox_inches, fixed_dpi=None):
"""
Temporarily adjust the figure so that only the specified area
(bbox_inches) is saved.
It modifies fig.bbox, fig.bbox_inches,
fig.transFigure._boxout, and fig.patch. While the figure size
changes, the scale of the original figure is conserved. A
function which restores the original values are returned.
"""
origBbox = fig.bbox
origBboxInches = fig.bbox_inches
_boxout = fig.transFigure._boxout
asp_list = []
locator_list = []
for ax in fig.axes:
pos = ax.get_position(original=False).frozen()
locator_list.append(ax.get_axes_locator())
asp_list.append(ax.get_aspect())
def _l(a, r, pos=pos):
return pos
ax.set_axes_locator(_l)
ax.set_aspect("auto")
def restore_bbox():
for ax, asp, loc in zip(fig.axes, asp_list, locator_list):
ax.set_aspect(asp)
ax.set_axes_locator(loc)
fig.bbox = origBbox
fig.bbox_inches = origBboxInches
fig.transFigure._boxout = _boxout
fig.transFigure.invalidate()
fig.patch.set_bounds(0, 0, 1, 1)
if fixed_dpi is not None:
tr = Affine2D().scale(fixed_dpi)
dpi_scale = fixed_dpi / fig.dpi
else:
tr = Affine2D().scale(fig.dpi)
dpi_scale = 1.
_bbox = TransformedBbox(bbox_inches, tr)
fig.bbox_inches = Bbox.from_bounds(0, 0,
bbox_inches.width, bbox_inches.height)
x0, y0 = _bbox.x0, _bbox.y0
w1, h1 = fig.bbox.width * dpi_scale, fig.bbox.height * dpi_scale
fig.transFigure._boxout = Bbox.from_bounds(-x0, -y0, w1, h1)
fig.transFigure.invalidate()
fig.bbox = TransformedBbox(fig.bbox_inches, tr)
fig.patch.set_bounds(x0 / w1, y0 / h1,
fig.bbox.width / w1, fig.bbox.height / h1)
return restore_bbox
def process_figure_for_rasterizing(fig, bbox_inches_restore, fixed_dpi=None):
"""
This need to be called when figure dpi changes during the drawing
(e.g., rasterizing). It recovers the bbox and re-adjust it with
the new dpi.
"""
bbox_inches, restore_bbox = bbox_inches_restore
restore_bbox()
r = adjust_bbox(fig, bbox_inches, fixed_dpi)
return bbox_inches, r
| mit |
glennq/scikit-learn | sklearn/metrics/cluster/__init__.py | 91 | 1468 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import fowlkes_mallows_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .unsupervised import calinski_harabaz_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"fowlkes_mallows_score", "entropy", "silhouette_samples",
"silhouette_score", "calinski_harabaz_score", "consensus_score"]
| bsd-3-clause |
cmshobe/landlab | tests/plot/network_sediment_transporter/test_plot_network_and_parcels.py | 3 | 5502 | import matplotlib.colors as colors
import numpy as np
import pytest
from matplotlib.colors import Normalize
from landlab.plot import plot_network_and_parcels
network_norm = Normalize(-1, 6)
parcel_color_norm = Normalize(0, 1)
parcel_color_norm2 = colors.LogNorm(vmin=0.01, vmax=1)
parcel_size_norm = Normalize(-1, 1)
parcel_size_norm2 = colors.LogNorm(vmin=0.01, vmax=1)
link_color_options = [
{}, # empty dictionary = defaults
{
"network_color": "r", # specify some simple modifications.
"network_linewidth": 0.7,
"parcel_alpha": 0,
},
{
"link_attribute": "sediment_total_volume", # use a link attribute
"parcel_alpha": 0,
},
{
"link_attribute": "sediment_total_volume",
"network_cmap": "jet", # change colormap
"network_norm": network_norm, # and normalize
"link_attribute_title": "Total Sediment Volume",
"parcel_alpha": 0,
"network_linewidth": 3,
},
]
parcel_color_options = [
{}, # empty dictionary = defaults
{"parcel_color": "r", "parcel_size": 10}, # specify some simple modifications.
{
"parcel_color_attribute": "D", # use a parcel attribute.
"parcel_color_norm": parcel_color_norm,
"parcel_color_attribute_title": "Diameter [m]",
"parcel_alpha": 1.0,
},
{"parcel_color_attribute": "abrasion_rate", "parcel_color_cmap": "bone"},
]
parcel_size_options = [
{}, # empty dictionary = defaults
{"parcel_color": "b", "parcel_size": 5}, # specify some simple modifications.
{
"parcel_size_attribute": "D", # use a parcel attribute.
"parcel_size_norm": parcel_size_norm2,
"parcel_size_attribute_title": "Diameter [m]",
"parcel_alpha": 1.0,
},
{
"parcel_size_attribute": "abrasion_rate", # an
"parcel_size_min": 10,
"parcel_size_max": 100,
},
]
@pytest.mark.parametrize("arg", ["synthetic", "methow"])
@pytest.mark.parametrize(
("l_opts", "pc_opts", "ps_opts"),
zip(link_color_options, parcel_color_options, parcel_size_options),
)
def test_link_options(arg, l_opts, pc_opts, ps_opts, request):
nst = request.getfixturevalue(arg)
grid = nst.grid
parcels = nst._parcels
opts = {**l_opts, **pc_opts, **ps_opts}
plot_network_and_parcels(grid, parcels, parcel_time_index=0, **opts)
@pytest.mark.parametrize("title", ["A random number", None])
@pytest.mark.parametrize("arg", ["synthetic", "methow"])
def test_link_array(arg, title, request):
nst = request.getfixturevalue(arg)
grid = nst.grid
parcels = nst._parcels
random_link = np.random.randn(grid.size("link"))
opts = {
"link_attribute": random_link, # use an array of size link.
"network_cmap": "jet", # change colormap
"network_norm": network_norm, # and normalize
"link_attribute_title": title,
"parcel_alpha": 0,
"network_linewidth": 3,
}
plot_network_and_parcels(grid, parcels, parcel_time_index=0, **opts)
@pytest.mark.parametrize("arg", ["synthetic", "methow"])
def test_with_filter(arg, request):
nst = request.getfixturevalue(arg)
grid = nst.grid
parcels = nst._parcels
parcel_filter = np.zeros((parcels.dataset.dims["item_id"]), dtype=bool)
parcel_filter[::10] = True
plot_network_and_parcels(
grid,
parcels,
parcel_time_index=0,
parcel_filter=parcel_filter,
link_attribute="sediment_total_volume",
network_norm=network_norm,
parcel_alpha=1.0,
parcel_size_attribute="D",
parcel_color_attribute="D",
parcel_color_norm=parcel_color_norm2,
parcel_size_norm=parcel_size_norm,
parcel_size_attribute_title="D",
)
def test_double_network_color(synthetic):
grid = synthetic.grid
parcels = synthetic._parcels
with pytest.raises(ValueError):
plot_network_and_parcels(
grid, parcels, link_attribute="sediment_total_volume", network_color="r"
)
def test_double_parcel_color(synthetic):
grid = synthetic.grid
parcels = synthetic._parcels
with pytest.raises(ValueError):
plot_network_and_parcels(
grid, parcels, parcel_color_attribute="D", parcel_color="r"
)
def test_double_parcel_size(synthetic):
grid = synthetic.grid
parcels = synthetic._parcels
with pytest.raises(ValueError):
plot_network_and_parcels(
grid, parcels, parcel_size_attribute="D", parcel_size=3
)
def test_categorical_parcel_color(synthetic):
grid = synthetic.grid
parcels = synthetic._parcels
"quartzite"
with pytest.raises(ValueError):
plot_network_and_parcels(grid, parcels, parcel_color_attribute="quartzite")
def test_categorical_parcel_size(synthetic):
grid = synthetic.grid
parcels = synthetic._parcels
"quartzite"
with pytest.raises(ValueError):
plot_network_and_parcels(grid, parcels, parcel_size_attribute="quartzite")
def test_missing_parcel_color(synthetic):
grid = synthetic.grid
parcels = synthetic._parcels
"quartzite"
with pytest.raises(ValueError):
plot_network_and_parcels(grid, parcels, parcel_color_attribute="not_here")
def test_missing_parcel_size(synthetic):
grid = synthetic.grid
parcels = synthetic._parcels
"quartzite"
with pytest.raises(ValueError):
plot_network_and_parcels(grid, parcels, parcel_size_attribute="not_here")
| mit |
TueVJ/OptFinFinalExam | clustering/ClusterETFs.py | 1 | 6108 | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import pandas.io.data as web
import scipy.cluster.hierarchy as hier
import seaborn as sns
from collections import defaultdict
from matplotlib.ticker import FuncFormatter
from sklearn.decomposition import PCA
def percentformatter(x, pos=0):
return "{:.0f}%".format(100 * x)
def label_point(x, y, val, ax):
# http://stackoverflow.com/questions/15910019/
a = pd.DataFrame({'x': x, 'y': y, 'val': val})
offsetx = (a.x.max() - a.x.min()) * 0.02
offsety = (a.y.max() - a.y.min()) * 0.02
for i, point in a.iterrows():
ax.text(
point['x'] + offsetx,
point['y'] + offsety,
str(point['val']),
alpha=1.0 # ,
#ha='right'
)
#Set number of clusters
nclust = 15
plt.ion()
instruments = pd.read_csv('../input_data/instruments2.csv')
startdate = '2005-01-28'
enddate = '2014-11-10'
# Download the asset data if the data isn't there,
# or it doesn't match the instruments in the csv file.
try:
baseetfs = pd.read_csv('../data/base_price_data.csv')
baseetfs = baseetfs.set_index(
baseetfs.columns[0]
).convert_objects(convert_numeric=True)
baseetfs.index = baseetfs.index.to_datetime()
except IOError:
baseetfs = web.DataReader(instruments.values.flatten().tolist(), 'google', startdate, enddate)
baseetfs = baseetfs.Close.convert_objects(convert_numeric=True)
baseetfs.to_csv('../data/base_price_data.csv')
# Filter out ETFs with a low number of observations
baseetfs = baseetfs.loc[:, baseetfs.count() > 2400]
print "Using {} ETFs with more than 2400 price entries".format(len(baseetfs.columns))
# Weekly ETF prices. Missing data is filled forward.
wetfs = baseetfs.resample('W-WED', how='first', fill_method='pad')
# Build correlation matrix of weekly returns
dlogreturns = np.log(wetfs).diff()
#dlc = dlogreturns.cov()
dlc = dlogreturns.corr()
# Colormap used for clusters
cluster_cmap = sns.cubehelix_palette(
as_cmap=True,
start=0.5,
rot=2.0,
hue=1.3,
gamma=1.0,
dark=0.2,
light=0.8
)
# Get eigenvalues and eigenvectors for plotting.
pca = PCA().fit(np.nan_to_num(dlogreturns.values))
# Extract largest eigenvalue and its eigenvector
eig1, evec1 = pca.explained_variance_[0], pca.components_[0]
# Extract second-largest eigenvalue and its eigenvector
eig2, evec2 = pca.explained_variance_[1], pca.components_[1]
print 'Explained variance by component 1: {:.02f} %'.format(
pca.explained_variance_ratio_[0]*100)
print 'Explained variance by component 2: {:.02f} %'.format(
pca.explained_variance_ratio_[1]*100)
# Clustering methods:
# 'single': duv = min(d(u[i], v[j]))_ij
# 'complete': duv = max(d(u[i], v[j]))_ij
# 'average': duv = avg(d(u[i], v[j]))_ij
# 'weighted': duv = (d(s,v) + d(t,v))/2 ,
# u is formed from s,t
methods = ['single', 'complete', 'average', 'weighted']
# Labels to be plotted on projection graphs
plotted_labels = ['IAU', 'IEF']
Zs = [hier.linkage(1 - dlc.values ** 2, method=m) for m in methods]
# Create nclust clusters from the linkage matrix data
idxs = []
for i, (Z, m) in enumerate(zip(Zs, methods)):
plt.figure(1, dpi=100, figsize=(6, 4))
plt.subplot(2, 2, i)
idx = hier.fcluster(
Z, nclust,
criterion='maxclust'
)
idxs.append(idx)
#Plot dendrogram
hier.dendrogram(
Z, color_threshold=Z[-nclust+1, 2],
# labels=['']*len(dlc.index),
labels=dlc.index,
leaf_font_size=2.5)
plt.title(m)
# Construct dataframe
plotdf = pd.DataFrame(dict(
e1=evec1.dot(dlc), e2=evec2.dot(dlc),
cluster=idx, label=dlc.index
))
# Plot PCA projections
plt.figure(2, dpi=100, figsize=(6, 4))
ax = plt.subplot(2, 2, i)
plotdf.plot(
kind='scatter',
x='e1', y='e2',
c=plotdf.cluster,
cmap=cluster_cmap,
ax=ax
)
label_point(plotdf.e1, plotdf.e2, [x if x in plotted_labels else '' for x in plotdf.label], ax)
plt.xlabel(r'Projection on $PC_1$')
plt.ylabel(r'Projection on $PC_2$')
plt.title(m)
plt.ylim([plotdf.e2.min()*1.10, plotdf.e2.max()*1.10])
# Save dendrogram
plt.figure(1)
plt.tight_layout()
plt.savefig('../pic/dendro_methods.pdf')
# Save PCA projection figure
plt.figure(2)
plt.tight_layout()
plt.savefig('../pic/pca_methods.pdf')
idx = idxs[0]
clustered_etfs = defaultdict(list)
for l, c in zip(plotdf.label, plotdf.cluster):
clustered_etfs[c].append(l)
selected_etfs_mean = []
selected_etfs_std = []
clusteridx = []
for c, l in clustered_etfs.iteritems():
# Select asset with highest mean weekly return
selected_etfs_mean.append(wetfs[l].mean().idxmax())
# Select assets with lowest standard deviation
selected_etfs_std.append(wetfs[l].std().idxmin())
# Save cluster index for coloring
clusteridx.append(c*1.0/(nclust-1))
# Plot price histories of selected ETFs
plt.figure(3, dpi=100, figsize=(6, 8))
axl = plt.subplot(211)
#selected_etfs = ['IAU', 'VNQ', 'IXG']
(baseetfs/baseetfs.ix[1])[selected_etfs_mean].plot(
color=map(cluster_cmap, clusteridx),
ax=axl
)
plt.gca().yaxis.set_major_formatter(FuncFormatter(percentformatter))
plt.tight_layout()
plt.ylabel('Price index')
plt.legend(loc='upper left', ncol=2, prop={'size': 10})
plt.title('Max return')
axr = plt.subplot(212)
(baseetfs/baseetfs.ix[1])[selected_etfs_std].plot(
color=map(cluster_cmap, clusteridx),
ax=axr,
)
plt.gca().yaxis.set_major_formatter(FuncFormatter(percentformatter))
plt.title('Min stdev')
plt.legend(loc='upper left', ncol=2, prop={'size': 10})
plt.tight_layout()
plt.savefig('../pic/prices_selected_assets.pdf')
# Save output
np.savetxt('../data/etfs_max_mean.csv', selected_etfs_mean, fmt='%s')
np.savetxt('../data/etfs_min_std.csv', selected_etfs_std, fmt='%s')
wetfs[selected_etfs_mean].to_csv('../data/etfs_max_mean_prices.csv', date_format='%Y-%m-%d')
wetfs[selected_etfs_std].to_csv('../data/etfs_min_std_prices.csv', date_format='%Y-%m-%d')
np.savetxt('../data/dates.csv', wetfs.index.format(), fmt='%s')
| mit |
huzq/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 17 | 6481 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set. [1]_, [2]_
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [3]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] Johanna Hardin, David M Rocke. The distribution of robust distances.
Journal of Computational and Graphical Statistics. December 1, 2005,
14(4): 928-946.
.. [2] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
.. [3] P. J. Rousseeuw. Least median of squares regression. Journal of American
Statistical Ass., 79:871, 1984.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1])).astype(int)
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
lw = 2
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", lw=lw, color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", lw=lw, color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", lw=lw, color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size // 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size // 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size // 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size // 5):(x_size // 2 - 1)],
err_cov_emp_full.mean(1)[(x_size // 5):(x_size // 2 - 1)],
color='green', ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
huongttlan/statsmodels | statsmodels/tools/tests/test_tools.py | 26 | 18818 | """
Test functions for models.tools
"""
from statsmodels.compat.python import lrange, range
import numpy as np
from numpy.random import standard_normal
from numpy.testing import (assert_equal, assert_array_equal,
assert_almost_equal, assert_string_equal, TestCase)
from nose.tools import (assert_true, assert_false, assert_raises)
from statsmodels.datasets import longley
from statsmodels.tools import tools
from statsmodels.tools.tools import pinv_extended
from statsmodels.compat.numpy import np_matrix_rank
class TestTools(TestCase):
def test_add_constant_list(self):
x = lrange(1,5)
x = tools.add_constant(x)
y = np.asarray([[1,1,1,1],[1,2,3,4.]]).T
assert_equal(x, y)
def test_add_constant_1d(self):
x = np.arange(1,5)
x = tools.add_constant(x)
y = np.asarray([[1,1,1,1],[1,2,3,4.]]).T
assert_equal(x, y)
def test_add_constant_has_constant1d(self):
x = np.ones(5)
x = tools.add_constant(x, has_constant='skip')
assert_equal(x, np.ones(5))
assert_raises(ValueError, tools.add_constant, x, has_constant='raise')
assert_equal(tools.add_constant(x, has_constant='add'),
np.ones((5, 2)))
def test_add_constant_has_constant2d(self):
x = np.asarray([[1,1,1,1],[1,2,3,4.]]).T
y = tools.add_constant(x, has_constant='skip')
assert_equal(x, y)
assert_raises(ValueError, tools.add_constant, x, has_constant='raise')
assert_equal(tools.add_constant(x, has_constant='add'),
np.column_stack((np.ones(4), x)))
def test_recipr(self):
X = np.array([[2,1],[-1,0]])
Y = tools.recipr(X)
assert_almost_equal(Y, np.array([[0.5,1],[0,0]]))
def test_recipr0(self):
X = np.array([[2,1],[-4,0]])
Y = tools.recipr0(X)
assert_almost_equal(Y, np.array([[0.5,1],[-0.25,0]]))
def test_rank(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
X = standard_normal((40,10))
self.assertEquals(tools.rank(X), np_matrix_rank(X))
X[:,0] = X[:,1] + X[:,2]
self.assertEquals(tools.rank(X), np_matrix_rank(X))
def test_extendedpinv(self):
X = standard_normal((40, 10))
np_inv = np.linalg.pinv(X)
np_sing_vals = np.linalg.svd(X, 0, 0)
sm_inv, sing_vals = pinv_extended(X)
assert_almost_equal(np_inv, sm_inv)
assert_almost_equal(np_sing_vals, sing_vals)
def test_extendedpinv_singular(self):
X = standard_normal((40, 10))
X[:, 5] = X[:, 1] + X[:, 3]
np_inv = np.linalg.pinv(X)
np_sing_vals = np.linalg.svd(X, 0, 0)
sm_inv, sing_vals = pinv_extended(X)
assert_almost_equal(np_inv, sm_inv)
assert_almost_equal(np_sing_vals, sing_vals)
def test_fullrank(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
X = standard_normal((40,10))
X[:,0] = X[:,1] + X[:,2]
Y = tools.fullrank(X)
self.assertEquals(Y.shape, (40,9))
self.assertEquals(tools.rank(Y), 9)
X[:,5] = X[:,3] + X[:,4]
Y = tools.fullrank(X)
self.assertEquals(Y.shape, (40,8))
warnings.simplefilter("ignore")
self.assertEquals(tools.rank(Y), 8)
def test_estimable():
rng = np.random.RandomState(20120713)
N, P = (40, 10)
X = rng.normal(size=(N, P))
C = rng.normal(size=(1, P))
isestimable = tools.isestimable
assert_true(isestimable(C, X))
assert_true(isestimable(np.eye(P), X))
for row in np.eye(P):
assert_true(isestimable(row, X))
X = np.ones((40, 2))
assert_true(isestimable([1, 1], X))
assert_false(isestimable([1, 0], X))
assert_false(isestimable([0, 1], X))
assert_false(isestimable(np.eye(2), X))
halfX = rng.normal(size=(N, 5))
X = np.hstack([halfX, halfX])
assert_false(isestimable(np.hstack([np.eye(5), np.zeros((5, 5))]), X))
assert_false(isestimable(np.hstack([np.zeros((5, 5)), np.eye(5)]), X))
assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), X))
# Test array-like for design
XL = X.tolist()
assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), XL))
# Test ValueError for incorrect number of columns
X = rng.normal(size=(N, 5))
for n in range(1, 4):
assert_raises(ValueError, isestimable, np.ones((n,)), X)
assert_raises(ValueError, isestimable, np.eye(4), X)
class TestCategoricalNumerical(object):
#TODO: use assert_raises to check that bad inputs are taken care of
def __init__(self):
#import string
stringabc = 'abcdefghijklmnopqrstuvwxy'
self.des = np.random.randn(25,2)
self.instr = np.floor(np.arange(10,60, step=2)/10)
x=np.zeros((25,5))
x[:5,0]=1
x[5:10,1]=1
x[10:15,2]=1
x[15:20,3]=1
x[20:25,4]=1
self.dummy = x
structdes = np.zeros((25,1),dtype=[('var1', 'f4'),('var2', 'f4'),
('instrument','f4'),('str_instr','a10')])
structdes['var1'] = self.des[:,0][:,None]
structdes['var2'] = self.des[:,1][:,None]
structdes['instrument'] = self.instr[:,None]
string_var = [stringabc[0:5], stringabc[5:10],
stringabc[10:15], stringabc[15:20],
stringabc[20:25]]
string_var *= 5
self.string_var = np.array(sorted(string_var))
structdes['str_instr'] = self.string_var[:,None]
self.structdes = structdes
self.recdes = structdes.view(np.recarray)
def test_array2d(self):
des = np.column_stack((self.des, self.instr, self.des))
des = tools.categorical(des, col=2)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],10)
def test_array1d(self):
des = tools.categorical(self.instr)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],6)
def test_array2d_drop(self):
des = np.column_stack((self.des, self.instr, self.des))
des = tools.categorical(des, col=2, drop=True)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],9)
def test_array1d_drop(self):
des = tools.categorical(self.instr, drop=True)
assert_array_equal(des, self.dummy)
assert_equal(des.shape[1],5)
def test_recarray2d(self):
des = tools.categorical(self.recdes, col='instrument')
# better way to do this?
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray2dint(self):
des = tools.categorical(self.recdes, col=2)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray1d(self):
instr = self.structdes['instrument'].view(np.recarray)
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_recarray1d_drop(self):
instr = self.structdes['instrument'].view(np.recarray)
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_recarray2d_drop(self):
des = tools.categorical(self.recdes, col='instrument', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray2d(self):
des = tools.categorical(self.structdes, col='instrument')
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray2dint(self):
des = tools.categorical(self.structdes, col=2)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray1d(self):
instr = self.structdes['instrument'].view(dtype=[('var1', 'f4')])
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_structarray2d_drop(self):
des = tools.categorical(self.structdes, col='instrument', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray1d_drop(self):
instr = self.structdes['instrument'].view(dtype=[('var1', 'f4')])
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
# def test_arraylike2d(self):
# des = tools.categorical(self.structdes.tolist(), col=2)
# test_des = des[:,-5:]
# assert_array_equal(test_des, self.dummy)
# assert_equal(des.shape[1], 9)
# def test_arraylike1d(self):
# instr = self.structdes['instrument'].tolist()
# dum = tools.categorical(instr)
# test_dum = dum[:,-5:]
# assert_array_equal(test_dum, self.dummy)
# assert_equal(dum.shape[1], 6)
# def test_arraylike2d_drop(self):
# des = tools.categorical(self.structdes.tolist(), col=2, drop=True)
# test_des = des[:,-5:]
# assert_array_equal(test__des, self.dummy)
# assert_equal(des.shape[1], 8)
# def test_arraylike1d_drop(self):
# instr = self.structdes['instrument'].tolist()
# dum = tools.categorical(instr, drop=True)
# assert_array_equal(dum, self.dummy)
# assert_equal(dum.shape[1], 5)
class TestCategoricalString(TestCategoricalNumerical):
# comment out until we have type coercion
# def test_array2d(self):
# des = np.column_stack((self.des, self.instr, self.des))
# des = tools.categorical(des, col=2)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],10)
# def test_array1d(self):
# des = tools.categorical(self.instr)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],6)
# def test_array2d_drop(self):
# des = np.column_stack((self.des, self.instr, self.des))
# des = tools.categorical(des, col=2, drop=True)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],9)
def test_array1d_drop(self):
des = tools.categorical(self.string_var, drop=True)
assert_array_equal(des, self.dummy)
assert_equal(des.shape[1],5)
def test_recarray2d(self):
des = tools.categorical(self.recdes, col='str_instr')
# better way to do this?
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray2dint(self):
des = tools.categorical(self.recdes, col=3)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray1d(self):
instr = self.structdes['str_instr'].view(np.recarray)
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_recarray1d_drop(self):
instr = self.structdes['str_instr'].view(np.recarray)
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_recarray2d_drop(self):
des = tools.categorical(self.recdes, col='str_instr', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray2d(self):
des = tools.categorical(self.structdes, col='str_instr')
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray2dint(self):
des = tools.categorical(self.structdes, col=3)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray1d(self):
instr = self.structdes['str_instr'].view(dtype=[('var1', 'a10')])
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_structarray2d_drop(self):
des = tools.categorical(self.structdes, col='str_instr', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray1d_drop(self):
instr = self.structdes['str_instr'].view(dtype=[('var1', 'a10')])
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_arraylike2d(self):
pass
def test_arraylike1d(self):
pass
def test_arraylike2d_drop(self):
pass
def test_arraylike1d_drop(self):
pass
def test_rec_issue302():
arr = np.rec.fromrecords([[10], [11]], names='group')
actual = tools.categorical(arr)
expected = np.rec.array([(10, 1.0, 0.0), (11, 0.0, 1.0)],
dtype=[('group', int), ('group_10', float), ('group_11', float)])
assert_array_equal(actual, expected)
def test_issue302():
arr = np.rec.fromrecords([[10, 12], [11, 13]], names=['group', 'whatever'])
actual = tools.categorical(arr, col=['group'])
expected = np.rec.array([(10, 12, 1.0, 0.0), (11, 13, 0.0, 1.0)],
dtype=[('group', int), ('whatever', int), ('group_10', float),
('group_11', float)])
assert_array_equal(actual, expected)
def test_pandas_const_series():
dta = longley.load_pandas()
series = dta.exog['GNP']
series = tools.add_constant(series, prepend=False)
assert_string_equal('const', series.columns[1])
assert_equal(series.var(0)[1], 0)
def test_pandas_const_series_prepend():
dta = longley.load_pandas()
series = dta.exog['GNP']
series = tools.add_constant(series, prepend=True)
assert_string_equal('const', series.columns[0])
assert_equal(series.var(0)[0], 0)
def test_pandas_const_df():
dta = longley.load_pandas().exog
dta = tools.add_constant(dta, prepend=False)
assert_string_equal('const', dta.columns[-1])
assert_equal(dta.var(0)[-1], 0)
def test_pandas_const_df_prepend():
dta = longley.load_pandas().exog
# regression test for #1025
dta['UNEMP'] /= dta['UNEMP'].std()
dta = tools.add_constant(dta, prepend=True)
assert_string_equal('const', dta.columns[0])
assert_equal(dta.var(0)[0], 0)
def test_chain_dot():
A = np.arange(1,13).reshape(3,4)
B = np.arange(3,15).reshape(4,3)
C = np.arange(5,8).reshape(3,1)
assert_equal(tools.chain_dot(A,B,C), np.array([[1820],[4300],[6780]]))
class TestNanDot(object):
@classmethod
def setupClass(cls):
nan = np.nan
cls.mx_1 = np.array([[nan, 1.], [2., 3.]])
cls.mx_2 = np.array([[nan, nan], [2., 3.]])
cls.mx_3 = np.array([[0., 0.], [0., 0.]])
cls.mx_4 = np.array([[1., 0.], [1., 0.]])
cls.mx_5 = np.array([[0., 1.], [0., 1.]])
cls.mx_6 = np.array([[1., 2.], [3., 4.]])
def test_11(self):
test_res = tools.nan_dot(self.mx_1, self.mx_1)
expected_res = np.array([[ np.nan, np.nan], [ np.nan, 11.]])
assert_array_equal(test_res, expected_res)
def test_12(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_2)
expected_res = np.array([[ nan, nan], [ nan, nan]])
assert_array_equal(test_res, expected_res)
def test_13(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_3)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_14(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_4)
expected_res = np.array([[ nan, 0.], [ 5., 0.]])
assert_array_equal(test_res, expected_res)
def test_41(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_4, self.mx_1)
expected_res = np.array([[ nan, 1.], [ nan, 1.]])
assert_array_equal(test_res, expected_res)
def test_23(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_3)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_32(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_3, self.mx_2)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_24(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_4)
expected_res = np.array([[ nan, 0.], [ 5., 0.]])
assert_array_equal(test_res, expected_res)
def test_25(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_5)
expected_res = np.array([[ 0., nan], [ 0., 5.]])
assert_array_equal(test_res, expected_res)
def test_66(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_6, self.mx_6)
expected_res = np.array([[ 7., 10.], [ 15., 22.]])
assert_array_equal(test_res, expected_res)
| bsd-3-clause |
hyflashstar/gupiao | src/K线图.py | 1 | 1825 | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 1 10:20:29 2017
@author: 53771
"""
import loadStock as ls
import tushare as ts
from matplotlib.dates import DateFormatter,WeekdayLocator,DayLocator,MONDAY,date2num
from matplotlib.finance import candlestick_ohlc
from datetime import datetime
import matplotlib.pyplot as plt
import pandas_candlestick_ohlc as pohlc
df=ls.read_hit_data('sh')
df2015=df['2012-09']
plotdat=pohlc.pandas_candlestick_ohlc(df2015)
df2012=df['2012']
Close=df2012.close
Open=df2012.open
C10p=Close-Open
Shape=[0,0,0]
lag1C10p=C10p.shift(1)
lag2C10p=C10p.shift(2)
for i in range(3,len(C10p)):
#上上日下跌进25%的线,昨日变化幅度在中线,当日变化幅大于上上日的一半
if all([lag2C10p[i]<-11,abs(lag1C10p[i])<2,C10p[i]>6,abs(C10p[i])>abs(lag2C10p[i]*0.5)]):
Shape.append(1)
else:
Shape.append(0)
lagOpen=Open.shift(1)
lagClose=Close.shift(1)
lag2Close=Close.shift(2)
Doji=[0,0,0]
for i in range(3,len(Open),1):
#今天开盘高于昨日开盘,昨日开盘小于前日收盘,昨日收盘小于今天开盘,昨日收盘小于前日收盘
if(all([lagOpen[i]<Open[i],lagOpen[i]<lag2Close[i],
lagClose[i]<Open[i],lagClose[i]<lag2Close[i]])):
Doji.append(1)
else:
Doji.append(0)
ret=Close/Close.shift(1)-1
lag1ret=ret.shift(1)
lag2ret=ret.shift(2)
Trend=[0,0,0]
for i in range(3,len(ret)):
#连续两个交易日下跌
if(all([lag1ret[i]<0,lag2ret[i]<0])):
Trend.append(1)
else:
Trend.append(0)
StarSig=[]
for i in range(len(Trend)):
if all([Shape[i]==1,Doji[i]==1,Trend[i]==1]):
StarSig.append(1)
else:
StarSig.append(0)
for i in range(len(StarSig)):
if StarSig[i]==1:
print(df2012.index[i])
| apache-2.0 |
huzq/scikit-learn | sklearn/tree/tests/test_reingold_tilford.py | 15 | 1763 | import numpy as np
import pytest
from sklearn.tree._reingold_tilford import buchheim, Tree
simple_tree = Tree("", 0,
Tree("", 1),
Tree("", 2))
bigger_tree = Tree("", 0,
Tree("", 1,
Tree("", 3),
Tree("", 4,
Tree("", 7),
Tree("", 8)
),
),
Tree("", 2,
Tree("", 5),
Tree("", 6)
)
)
@pytest.mark.parametrize("tree, n_nodes", [(simple_tree, 3), (bigger_tree, 9)])
def test_buchheim(tree, n_nodes):
def walk_tree(draw_tree):
res = [(draw_tree.x, draw_tree.y)]
for child in draw_tree.children:
# parents higher than children:
assert child.y == draw_tree.y + 1
res.extend(walk_tree(child))
if len(draw_tree.children):
# these trees are always binary
# parents are centered above children
assert draw_tree.x == (draw_tree.children[0].x
+ draw_tree.children[1].x) / 2
return res
layout = buchheim(tree)
coordinates = walk_tree(layout)
assert len(coordinates) == n_nodes
# test that x values are unique per depth / level
# we could also do it quicker using defaultdicts..
depth = 0
while True:
x_at_this_depth = [node[0] for node in coordinates
if node[1] == depth]
if not x_at_this_depth:
# reached all leafs
break
assert len(np.unique(x_at_this_depth)) == len(x_at_this_depth)
depth += 1
| bsd-3-clause |
alisidd/tensorflow | tensorflow/contrib/learn/__init__.py | 25 | 2458 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# TODO(ptucker,ipolosukhin): Improve descriptions.
"""High level API for learning.
See the @{$python/contrib.learn} guide.
@@BaseEstimator
@@Estimator
@@Trainable
@@Evaluable
@@KMeansClustering
@@ModeKeys
@@ModelFnOps
@@MetricSpec
@@PredictionKey
@@DNNClassifier
@@DNNEstimator
@@DNNRegressor
@@DNNLinearCombinedRegressor
@@DNNLinearCombinedEstimator
@@DNNLinearCombinedClassifier
@@DynamicRnnEstimator
@@LinearClassifier
@@LinearEstimator
@@LinearRegressor
@@LogisticRegressor
@@StateSavingRnnEstimator
@@SVM
@@SKCompat
@@Head
@@multi_class_head
@@multi_label_head
@@binary_svm_head
@@regression_head
@@poisson_regression_head
@@multi_head
@@no_op_train_fn
@@Experiment
@@ExportStrategy
@@TaskType
@@NanLossDuringTrainingError
@@RunConfig
@@evaluate
@@infer
@@run_feeds
@@run_n
@@train
@@extract_dask_data
@@extract_dask_labels
@@extract_pandas_data
@@extract_pandas_labels
@@extract_pandas_matrix
@@infer_real_valued_columns_from_input
@@infer_real_valued_columns_from_input_fn
@@read_batch_examples
@@read_batch_features
@@read_batch_record_features
@@InputFnOps
@@ProblemType
@@build_parsing_serving_input_fn
@@make_export_strategy
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import *
# pylint: enable=wildcard-import
from tensorflow.contrib.learn.python.learn import learn_runner_lib as learn_runner
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['datasets', 'head', 'io', 'learn_runner', 'models',
'monitors', 'NotFittedError', 'ops', 'preprocessing',
'utils', 'graph_actions']
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
MrCodeYu/spark | python/pyspark/sql/session.py | 3 | 24896 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
else:
from itertools import imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.catalog import Catalog
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, _verify_type, \
_infer_schema, _has_nulltype, _merge_type, _create_converter, _parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedContext
if session is None:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
# This SparkContext may be an existing one.
for key, value in self._options.items():
# we need to propagate the confs
# before we create the SparkSession. Otherwise, confs like
# warehouse path and metastore url will not be set correctly (
# these confs cannot be changed once the SparkSession is created).
sc._conf.set(key, value)
session = SparkSession(sc)
for key, value in self._options.items():
session.conf.set(key, value)
for key, value in self._options.items():
session.sparkContext._conf.set(key, value)
return session
builder = Builder()
_instantiatedContext = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
if SparkSession._instantiatedContext is None:
SparkSession._instantiatedContext = self
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
"""
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.context import UDFRegistration
return UDFRegistration(self._wrapped)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, map(_infer_schema, data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(_infer_schema).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or
:class:`pyspark.sql.types.StringType`, it must match the
real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a
:class:`pyspark.sql.types.StringType` or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.0.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
if schema is None:
schema = [str(x) for x in data.columns]
data = [r.tolist() for r in data.to_records(index=False)]
verify_func = _verify_type if verifySchema else lambda _, t: True
if isinstance(schema, StructType):
def prepare(obj):
verify_func(obj, schema)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
def prepare(obj):
verify_func(obj, dataType)
return obj,
else:
if isinstance(schema, list):
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Experimental.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Experimental.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
SparkSession._instantiatedContext = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
dalejung/trtools | trtools/io/tests/test_bundle.py | 1 | 1190 | from unittest import TestCase
import pandas as pd
import numpy as np
import trtools.io.bundle as b
import trtools.util.testing as tm
from trtools.util.tempdir import TemporaryDirectory
# panel with many items and < 10 columns
panel = pd.Panel({('item'+str(i), i, i+.0324) : tm.fake_ohlc() for i in range(5000)})
panel.items = pd.MultiIndex.from_tuples(panel.items)
assert isinstance(panel.items, pd.MultiIndex) # make this more complicated
class TestBundle(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
def test_panel(self):
"""
Overview test that panel bundle saving works
"""
with TemporaryDirectory() as td:
b.save_panel(panel, td)
test = b.load_panel(td)
tm.assert_panel_equal(panel, test)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],exit=False)
| mit |
benoitsteiner/tensorflow-xsmm | tensorflow/contrib/learn/python/learn/estimators/estimator_input_test.py | 46 | 13101 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
from tensorflow.python.training import training_util
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features],
0), array_ops.concat([labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
class EstimatorInputTest(test.TestCase):
def testContinueTrainingDictionaryInput(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=50)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
del est
# Create another estimator object with the same output dir.
est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston_input,
y=float64_target,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
self.assertAllClose(scores2['MSE'], scores['MSE'])
predictions = np.array(list(est2.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions,
float64_target['labels'])
self.assertAllClose(other_score, scores['MSE'])
def testBostonAll(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=100)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(scores['MSE'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testBostonAllDictionaryInput(self):
boston = base.load_boston()
est = estimator.Estimator(model_fn=linear_model_fn)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=100)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
predictions = np.array(list(est.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = base.load_iris()
est = estimator.SKCompat(
estimator.Estimator(model_fn=logistic_model_no_mode_fn))
est.fit(iris.data, iris.target, steps=100)
scores = est.score(
x=iris.data,
y=iris.target,
metrics={
('accuracy', 'class'): metric_ops.streaming_accuracy
})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
self.assertAllClose(predictions['class'], predictions_class)
self.assertAllClose(predictions['class'],
np.argmax(predictions['prob'], axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(scores['accuracy'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisAllDictionaryInput(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
iris_data = {'input': iris.data}
iris_target = {'labels': iris.target}
est.fit(iris_data, iris_target, steps=100)
scores = est.evaluate(
x=iris_data,
y=iris_target,
metrics={
('accuracy', 'class'): metric_ops.streaming_accuracy
})
predictions = list(est.predict(x=iris_data))
predictions_class = list(est.predict(x=iris_data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(classes_batch,
np.argmax(
np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisInputFnLabelsDict(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_labels_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_labels_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='class',
label_key='labels')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testTrainInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testPredictInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFnWithQueue(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0] * 2)
def testPredictConstInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
def input_fn():
features = array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
if __name__ == '__main__':
test.main()
| apache-2.0 |
Titan-C/scikit-learn | examples/plot_compare_reduction.py | 45 | 4959 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
=================================================================
Selecting dimensionality reduction with Pipeline and GridSearchCV
=================================================================
This example constructs a pipeline that does dimensionality
reduction followed by prediction with a support vector
classifier. It demonstrates the use of ``GridSearchCV`` and
``Pipeline`` to optimize over different classes of estimators in a
single CV run -- unsupervised ``PCA`` and ``NMF`` dimensionality
reductions are compared to univariate feature selection during
the grid search.
Additionally, ``Pipeline`` can be instantiated with the ``memory``
argument to memoize the transformers within the pipeline, avoiding to fit
again the same transformers over and over.
Note that the use of ``memory`` to enable caching becomes interesting when the
fitting of a transformer is costly.
"""
###############################################################################
# Illustration of ``Pipeline`` and ``GridSearchCV``
###############################################################################
# This section illustrates the use of a ``Pipeline`` with
# ``GridSearchCV``
# Authors: Robert McGibbon, Joel Nothman, Guillaume Lemaitre
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.decomposition import PCA, NMF
from sklearn.feature_selection import SelectKBest, chi2
print(__doc__)
pipe = Pipeline([
('reduce_dim', PCA()),
('classify', LinearSVC())
])
N_FEATURES_OPTIONS = [2, 4, 8]
C_OPTIONS = [1, 10, 100, 1000]
param_grid = [
{
'reduce_dim': [PCA(iterated_power=7), NMF()],
'reduce_dim__n_components': N_FEATURES_OPTIONS,
'classify__C': C_OPTIONS
},
{
'reduce_dim': [SelectKBest(chi2)],
'reduce_dim__k': N_FEATURES_OPTIONS,
'classify__C': C_OPTIONS
},
]
reducer_labels = ['PCA', 'NMF', 'KBest(chi2)']
grid = GridSearchCV(pipe, cv=3, n_jobs=1, param_grid=param_grid)
digits = load_digits()
grid.fit(digits.data, digits.target)
mean_scores = np.array(grid.cv_results_['mean_test_score'])
# scores are in the order of param_grid iteration, which is alphabetical
mean_scores = mean_scores.reshape(len(C_OPTIONS), -1, len(N_FEATURES_OPTIONS))
# select score for best C
mean_scores = mean_scores.max(axis=0)
bar_offsets = (np.arange(len(N_FEATURES_OPTIONS)) *
(len(reducer_labels) + 1) + .5)
plt.figure()
COLORS = 'bgrcmyk'
for i, (label, reducer_scores) in enumerate(zip(reducer_labels, mean_scores)):
plt.bar(bar_offsets + i, reducer_scores, label=label, color=COLORS[i])
plt.title("Comparing feature reduction techniques")
plt.xlabel('Reduced number of features')
plt.xticks(bar_offsets + len(reducer_labels) / 2, N_FEATURES_OPTIONS)
plt.ylabel('Digit classification accuracy')
plt.ylim((0, 1))
plt.legend(loc='upper left')
###############################################################################
# Caching transformers within a ``Pipeline``
###############################################################################
# It is sometimes worthwhile storing the state of a specific transformer
# since it could be used again. Using a pipeline in ``GridSearchCV`` triggers
# such situations. Therefore, we use the argument ``memory`` to enable caching.
#
# .. warning::
# Note that this example is, however, only an illustration since for this
# specific case fitting PCA is not necessarily slower than loading the
# cache. Hence, use the ``memory`` constructor parameter when the fitting
# of a transformer is costly.
from tempfile import mkdtemp
from shutil import rmtree
from sklearn.externals.joblib import Memory
# Create a temporary folder to store the transformers of the pipeline
cachedir = mkdtemp()
memory = Memory(cachedir=cachedir, verbose=10)
cached_pipe = Pipeline([('reduce_dim', PCA()),
('classify', LinearSVC())],
memory=memory)
# This time, a cached pipeline will be used within the grid search
grid = GridSearchCV(cached_pipe, cv=3, n_jobs=1, param_grid=param_grid)
digits = load_digits()
grid.fit(digits.data, digits.target)
# Delete the temporary cache before exiting
rmtree(cachedir)
###############################################################################
# The ``PCA`` fitting is only computed at the evaluation of the first
# configuration of the ``C`` parameter of the ``LinearSVC`` classifier. The
# other configurations of ``C`` will trigger the loading of the cached ``PCA``
# estimator data, leading to save processing time. Therefore, the use of
# caching the pipeline using ``memory`` is highly beneficial when fitting
# a transformer is costly.
plt.show()
| bsd-3-clause |
Duke-NSOE/GeoHAT | GeoHat_V10/Scripts/networkx/drawing/nx_pylab.py | 10 | 27884 | """
**********
Matplotlib
**********
Draw networks with matplotlib (pylab).
See Also
--------
matplotlib: http://matplotlib.sourceforge.net/
pygraphviz: http://networkx.lanl.gov/pygraphviz/
"""
# Copyright (C) 2004-2012 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.drawing.layout import shell_layout,\
circular_layout,spectral_layout,spring_layout,random_layout
__author__ = """Aric Hagberg ([email protected])"""
__all__ = ['draw',
'draw_networkx',
'draw_networkx_nodes',
'draw_networkx_edges',
'draw_networkx_labels',
'draw_networkx_edge_labels',
'draw_circular',
'draw_random',
'draw_spectral',
'draw_spring',
'draw_shell',
'draw_graphviz']
def draw(G, pos=None, ax=None, hold=None, **kwds):
"""Draw the graph G with Matplotlib (pylab).
Draw the graph as a simple representation with no node
labels or edge labels and using the full Matplotlib figure area
and no axis labels by default. See draw_networkx() for more
full-featured drawing that allows title, axis labels etc.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in specified Matplotlib axes.
hold : bool, optional
Set the Matplotlib hold state. If True subsequent draw
commands will be added to the current axes.
**kwds : optional keywords
See networkx.draw_networkx() for a description of optional keywords.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
See Also
--------
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
Notes
-----
This function has the same name as pylab.draw and pyplot.draw
so beware when using
>>> from networkx import *
since you might overwrite the pylab.draw function.
Good alternatives are:
With pylab:
>>> import pylab as P #
>>> import networkx as nx
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G) # networkx draw()
>>> P.draw() # pylab draw()
With pyplot
>>> import matplotlib.pyplot as plt
>>> import networkx as nx
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G) # networkx draw()
>>> plt.draw() # pyplot draw()
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
"""
try:
import matplotlib.pylab as pylab
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
cf=pylab.gcf()
cf.set_facecolor('w')
if ax is None:
if cf._axstack() is None:
ax=cf.add_axes((0,0,1,1))
else:
ax=cf.gca()
# allow callers to override the hold state by passing hold=True|False
b = pylab.ishold()
h = kwds.pop('hold', None)
if h is not None:
pylab.hold(h)
try:
draw_networkx(G,pos=pos,ax=ax,**kwds)
ax.set_axis_off()
pylab.draw_if_interactive()
except:
pylab.hold(b)
raise
pylab.hold(b)
return
def draw_networkx(G, pos=None, with_labels=True, **kwds):
"""Draw the graph G using Matplotlib.
Draw the graph with Matplotlib with options for node positions,
labeling, titles, and many other drawing features.
See draw() for simple drawing without labels or axes.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
with_labels : bool, optional (default=True)
Set to True to draw labels on the nodes.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional (default G.nodes())
Draw only specified nodes
edgelist : list, optional (default=G.edges())
Draw only specified edges
node_size : scalar or array, optional (default=300)
Size of nodes. If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats, (default='r')
Node color. Can be a single color format string,
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string, optional (default='o')
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8'.
alpha : float, optional (default=1.0)
The node transparency
cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of nodes
vmin,vmax : float, optional (default=None)
Minimum and maximum for node colormap scaling
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
width : float, optional (default=1.0)
Line width of edges
edge_color : color string, or array of floats (default='r')
Edge color. Can be a single color format string,
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
edge_ cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of edges
edge_vmin,edge_vmax : floats, optional (default=None)
Minimum and maximum for edge colormap scaling
style : string, optional (deafult='solid')
Edge line style (solid|dashed|dotted,dashdot)
labels : dictionary, optional (deafult=None)
Node labels in a dictionary keyed by node of text labels
font_size : int, optional (default=12)
Font size for text labels
font_color : string, optional (default='k' black)
Font color string
font_weight : string, optional (default='normal')
Font weight
font_family : string, optional (default='sans-serif')
Font family
label : string, optional
Label for graph legend
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
>>> import pylab
>>> limits=pylab.axis('off') # turn of axis
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pylab as pylab
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if pos is None:
pos=nx.drawing.spring_layout(G) # default to spring layout
node_collection=draw_networkx_nodes(G, pos, **kwds)
edge_collection=draw_networkx_edges(G, pos, **kwds)
if with_labels:
draw_networkx_labels(G, pos, **kwds)
pylab.draw_if_interactive()
def draw_networkx_nodes(G, pos,
nodelist=None,
node_size=300,
node_color='r',
node_shape='o',
alpha=1.0,
cmap=None,
vmin=None,
vmax=None,
ax=None,
linewidths=None,
label = None,
**kwds):
"""Draw the nodes of the graph G.
This draws only the nodes of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional
Draw only specified nodes (default G.nodes())
node_size : scalar or array
Size of nodes (default=300). If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats
Node color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8' (default='o').
alpha : float
The node transparency (default=1.0)
cmap : Matplotlib colormap
Colormap for mapping intensities of nodes (default=None)
vmin,vmax : floats
Minimum and maximum for node colormap scaling (default=None)
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
label : [None| string]
Label for legend
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nodes=nx.draw_networkx_nodes(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pylab as pylab
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax=pylab.gca()
if nodelist is None:
nodelist=G.nodes()
if not nodelist or len(nodelist)==0: # empty nodelist, no drawing
return None
try:
xy=numpy.asarray([pos[v] for v in nodelist])
except KeyError as e:
raise nx.NetworkXError('Node %s has no position.'%e)
except ValueError:
raise nx.NetworkXError('Bad value in node positions.')
node_collection=ax.scatter(xy[:,0], xy[:,1],
s=node_size,
c=node_color,
marker=node_shape,
cmap=cmap,
vmin=vmin,
vmax=vmax,
alpha=alpha,
linewidths=linewidths,
label=label)
# pylab.axes(ax)
pylab.sci(node_collection)
node_collection.set_zorder(2)
return node_collection
def draw_networkx_edges(G, pos,
edgelist=None,
width=1.0,
edge_color='k',
style='solid',
alpha=None,
edge_cmap=None,
edge_vmin=None,
edge_vmax=None,
ax=None,
arrows=True,
label=None,
**kwds):
"""Draw the edges of the graph G.
This draws only the edges of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
edgelist : collection of edge tuples
Draw only specified edges(default=G.edges())
width : float
Line width of edges (default =1.0)
edge_color : color string, or array of floats
Edge color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
style : string
Edge line style (default='solid') (solid|dashed|dotted,dashdot)
alpha : float
The edge transparency (default=1.0)
edge_ cmap : Matplotlib colormap
Colormap for mapping intensities of edges (default=None)
edge_vmin,edge_vmax : floats
Minimum and maximum for edge colormap scaling (default=None)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
arrows : bool, optional (default=True)
For directed graphs, if True draw arrowheads.
label : [None| string]
Label for legend
Notes
-----
For directed graphs, "arrows" (actually just thicker stubs) are drawn
at the head end. Arrows can be turned off with keyword arrows=False.
Yes, it is ugly but drawing proper arrows with Matplotlib this
way is tricky.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edges=nx.draw_networkx_edges(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib
import matplotlib.pylab as pylab
import matplotlib.cbook as cb
from matplotlib.colors import colorConverter,Colormap
from matplotlib.collections import LineCollection
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax=pylab.gca()
if edgelist is None:
edgelist=G.edges()
if not edgelist or len(edgelist)==0: # no edges!
return None
# set edge positions
edge_pos=numpy.asarray([(pos[e[0]],pos[e[1]]) for e in edgelist])
if not cb.iterable(width):
lw = (width,)
else:
lw = width
if not cb.is_string_like(edge_color) \
and cb.iterable(edge_color) \
and len(edge_color)==len(edge_pos):
if numpy.alltrue([cb.is_string_like(c)
for c in edge_color]):
# (should check ALL elements)
# list of color letters such as ['k','r','k',...]
edge_colors = tuple([colorConverter.to_rgba(c,alpha)
for c in edge_color])
elif numpy.alltrue([not cb.is_string_like(c)
for c in edge_color]):
# If color specs are given as (rgb) or (rgba) tuples, we're OK
if numpy.alltrue([cb.iterable(c) and len(c) in (3,4)
for c in edge_color]):
edge_colors = tuple(edge_color)
else:
# numbers (which are going to be mapped with a colormap)
edge_colors = None
else:
raise ValueError('edge_color must consist of either color names or numbers')
else:
if cb.is_string_like(edge_color) or len(edge_color)==1:
edge_colors = ( colorConverter.to_rgba(edge_color, alpha), )
else:
raise ValueError('edge_color must be a single color or list of exactly m colors where m is the number or edges')
edge_collection = LineCollection(edge_pos,
colors = edge_colors,
linewidths = lw,
antialiaseds = (1,),
linestyle = style,
transOffset = ax.transData,
)
edge_collection.set_zorder(1) # edges go behind nodes
edge_collection.set_label(label)
ax.add_collection(edge_collection)
# Note: there was a bug in mpl regarding the handling of alpha values for
# each line in a LineCollection. It was fixed in matplotlib in r7184 and
# r7189 (June 6 2009). We should then not set the alpha value globally,
# since the user can instead provide per-edge alphas now. Only set it
# globally if provided as a scalar.
if cb.is_numlike(alpha):
edge_collection.set_alpha(alpha)
if edge_colors is None:
if edge_cmap is not None:
assert(isinstance(edge_cmap, Colormap))
edge_collection.set_array(numpy.asarray(edge_color))
edge_collection.set_cmap(edge_cmap)
if edge_vmin is not None or edge_vmax is not None:
edge_collection.set_clim(edge_vmin, edge_vmax)
else:
edge_collection.autoscale()
pylab.sci(edge_collection)
arrow_collection=None
if G.is_directed() and arrows:
# a directed graph hack
# draw thick line segments at head end of edge
# waiting for someone else to implement arrows that will work
arrow_colors = edge_colors
a_pos=[]
p=1.0-0.25 # make head segment 25 percent of edge length
for src,dst in edge_pos:
x1,y1=src
x2,y2=dst
dx=x2-x1 # x offset
dy=y2-y1 # y offset
d=numpy.sqrt(float(dx**2+dy**2)) # length of edge
if d==0: # source and target at same position
continue
if dx==0: # vertical edge
xa=x2
ya=dy*p+y1
if dy==0: # horizontal edge
ya=y2
xa=dx*p+x1
else:
theta=numpy.arctan2(dy,dx)
xa=p*d*numpy.cos(theta)+x1
ya=p*d*numpy.sin(theta)+y1
a_pos.append(((xa,ya),(x2,y2)))
arrow_collection = LineCollection(a_pos,
colors = arrow_colors,
linewidths = [4*ww for ww in lw],
antialiaseds = (1,),
transOffset = ax.transData,
)
arrow_collection.set_zorder(1) # edges go behind nodes
arrow_collection.set_label(label)
ax.add_collection(arrow_collection)
# update view
minx = numpy.amin(numpy.ravel(edge_pos[:,:,0]))
maxx = numpy.amax(numpy.ravel(edge_pos[:,:,0]))
miny = numpy.amin(numpy.ravel(edge_pos[:,:,1]))
maxy = numpy.amax(numpy.ravel(edge_pos[:,:,1]))
w = maxx-minx
h = maxy-miny
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
ax.update_datalim( corners)
ax.autoscale_view()
# if arrow_collection:
return edge_collection
def draw_networkx_labels(G, pos,
labels=None,
font_size=12,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
ax=None,
**kwds):
"""Draw node labels on the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_family : string
Font family (default='sans-serif')
font_weight : string
Font weight (default='normal')
alpha : float
The text transparency (default=1.0)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> labels=nx.draw_networkx_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pylab as pylab
import matplotlib.cbook as cb
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax=pylab.gca()
if labels is None:
labels=dict( (n,n) for n in G.nodes())
# set optional alignment
horizontalalignment=kwds.get('horizontalalignment','center')
verticalalignment=kwds.get('verticalalignment','center')
text_items={} # there is no text collection so we'll fake one
for n, label in labels.items():
(x,y)=pos[n]
if not cb.is_string_like(label):
label=str(label) # this will cause "1" and 1 to be labeled the same
t=ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform = ax.transData,
clip_on=True,
)
text_items[n]=t
return text_items
def draw_networkx_edge_labels(G, pos,
edge_labels=None,
label_pos=0.5,
font_size=10,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
bbox=None,
ax=None,
rotate=True,
**kwds):
"""Draw edge labels.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
alpha : float
The text transparency (default=1.0)
edge_labels : dictionary
Edge labels in a dictionary keyed by edge two-tuple of text
labels (default=None). Only labels for the keys in the dictionary
are drawn.
label_pos : float
Position of edge label along edge (0=head, 0.5=center, 1=tail)
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_weight : string
Font weight (default='normal')
font_family : string
Font family (default='sans-serif')
bbox : Matplotlib bbox
Specify text box shape and colors.
clip_on : bool
Turn on clipping at axis boundaries (default=True)
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edge_labels=nx.draw_networkx_edge_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
"""
try:
import matplotlib.pylab as pylab
import matplotlib.cbook as cb
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax=pylab.gca()
if edge_labels is None:
labels=dict( ((u,v), d) for u,v,d in G.edges(data=True) )
else:
labels = edge_labels
text_items={}
for (n1,n2), label in labels.items():
(x1,y1)=pos[n1]
(x2,y2)=pos[n2]
(x,y) = (x1 * label_pos + x2 * (1.0 - label_pos),
y1 * label_pos + y2 * (1.0 - label_pos))
if rotate:
angle=numpy.arctan2(y2-y1,x2-x1)/(2.0*numpy.pi)*360 # degrees
# make label orientation "right-side-up"
if angle > 90:
angle-=180
if angle < - 90:
angle+=180
# transform data coordinate angle to screen coordinate angle
xy=numpy.array((x,y))
trans_angle=ax.transData.transform_angles(numpy.array((angle,)),
xy.reshape((1,2)))[0]
else:
trans_angle=0.0
# use default box of white with white border
if bbox is None:
bbox = dict(boxstyle='round',
ec=(1.0, 1.0, 1.0),
fc=(1.0, 1.0, 1.0),
)
if not cb.is_string_like(label):
label=str(label) # this will cause "1" and 1 to be labeled the same
# set optional alignment
horizontalalignment=kwds.get('horizontalalignment','center')
verticalalignment=kwds.get('verticalalignment','center')
t=ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
rotation=trans_angle,
transform = ax.transData,
bbox = bbox,
zorder = 1,
clip_on=True,
)
text_items[(n1,n2)]=t
return text_items
def draw_circular(G, **kwargs):
"""Draw the graph G with a circular layout."""
draw(G,circular_layout(G),**kwargs)
def draw_random(G, **kwargs):
"""Draw the graph G with a random layout."""
draw(G,random_layout(G),**kwargs)
def draw_spectral(G, **kwargs):
"""Draw the graph G with a spectral layout."""
draw(G,spectral_layout(G),**kwargs)
def draw_spring(G, **kwargs):
"""Draw the graph G with a spring layout."""
draw(G,spring_layout(G),**kwargs)
def draw_shell(G, **kwargs):
"""Draw networkx graph with shell layout."""
nlist = kwargs.get('nlist', None)
if nlist != None:
del(kwargs['nlist'])
draw(G,shell_layout(G,nlist=nlist),**kwargs)
def draw_graphviz(G, prog="neato", **kwargs):
"""Draw networkx graph with graphviz layout."""
pos=nx.drawing.graphviz_layout(G,prog)
draw(G,pos,**kwargs)
def draw_nx(G,pos,**kwds):
"""For backward compatibility; use draw or draw_networkx."""
draw(G,pos,**kwds)
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import matplotlib as mpl
mpl.use('PS',warn=False)
import pylab
except:
raise SkipTest("matplotlib not available")
| cc0-1.0 |
kklmn/xrt | examples/withRaycing/06_AnalyzerBent1D/flux-dE.py | 1 | 6397 | # -*- coding: utf-8 -*-
__author__ = "Konstantin Klementiev"
__date__ = "14 October 2014"
import os, sys; sys.path.append(os.path.join('..', '..', '..')) # analysis:ignore
import numpy as np
#import matplotlib as mpl
import matplotlib.pyplot as plt
import xrt.backends.raycing.materials as rm
cases = []
band = 'narrow'
#band = '8e-4'
crystalMaterial = 'Si'
if crystalMaterial == 'Si':
d111 = 3.1354161
elif crystalMaterial == 'Ge':
d111 = 3.2662725
else:
raise
crystal = rm.CrystalDiamond((4, 4, 4), d111/4, elements=crystalMaterial)
thetaDegrees = [40, 60, 80]
class Case:
I0 = 1e13
def __init__(self, name, style, thetaD, dxPrime, dzPrime, N, I, dE,
Nband8e_4=0, Iband8e_4=0):
self.name = name
self.style = style
self.thetaD = thetaD
self.dxPrime = dxPrime
self.dzPrime = dzPrime
self.N = N
self.I = I # 7 lines band
self.dE = dE
self.Nband8e_4 = Nband8e_4 # 8e-4 band
self.Iband8e_4 = Iband8e_4
if thetaD == 40:
self.color = 'r'
self.alpha = 1
elif thetaD == 60:
self.color = 'b'
self.alpha = 0.5
elif thetaD == 80:
self.color = 'g'
self.alpha = 0.25
else:
raise
cases.append(self)
self.get_flux()
def get_flux(self):
if band == 'narrow':
I, N = self.I, self.N
elif band == '8e-4':
I, N = self.Iband8e_4, self.Nband8e_4
else:
raise
self.flux = (self.I0 * I / N * self.dxPrime * self.dzPrime / (4*np.pi))
# SivonHamosDiced40-det_E-7lin.png
thetaD = 40
dxP, dzP = 0.44, 0.257115043875
Case('von Hamos, diced 5 mm', 'v', thetaD, dxP, dzP, 16e8, 30337.3, 4.957,
4e8, 7589.34)
Case('von Hamos, diced 1 mm', '^', thetaD, dxP, dzP, 16e8, 29142.5, 2.745,
4e8, 7251.19)
Case('von Hamos, not diced', 'D', thetaD, dxP, dzP, 16e8, 30493.5, 1.815,
4e8, 7645.88)
dxP, dzP = 0.162100707902, 0.104196326561
Case('Johansson', '*', thetaD, dxP, dzP, 256e6, 790259, 0.286,
256e6, 408889)
Case('Johann', 'o', thetaD, dxP, dzP, 256e6, 261728, 5.722,
256e6, 383036)
dxP, dzP = 0.0707066370655, 0.0413175911167
Case('Johann as von Hamos', 's', thetaD, dxP, dzP, 400e6, 30694.2, 0.473,
4e8, 30707)
thetaD = 60
dxP, dzP = 0.44, 0.346410161514
Case('von Hamos, diced 5 mm', 'v', thetaD, dxP, dzP, 16e8, 43159.9, 2.011,
4e8, 10701)
Case('von Hamos, diced 1 mm', '^', thetaD, dxP, dzP, 16e8, 41099.2, 1.128,
4e8, 10428.9)
Case('von Hamos, not diced', 'D', thetaD, dxP, dzP, 16e8, 43410.3, 0.932,
4e8, 10854.7)
dxP, dzP = 0.117802177587, 0.102019678411
Case('Johansson', '*', thetaD, dxP, dzP, 256e6, 1.74956e6, 0.196,
256e6, 723768)
Case('Johann', 'o', thetaD, dxP, dzP, 256e6, 718938, 1.655,
256e6, 798442)
dxP, dzP = 0.0952627944163, 0.075
Case('Johann as von Hamos', 's', thetaD, dxP, dzP, 400e6, 43530.6, 0.236,
4e8, 43480.3)
thetaD = 80
dxP, dzP = 0.44, 0.393923101205
Case('von Hamos, diced 5 mm', 'v', thetaD, dxP, dzP, 16e8, 141777, 0.394,
7.5e8, 66463.3)
Case('von Hamos, diced 1 mm', '^', thetaD, dxP, dzP, 16e8, 135873, 0.275,
4e8, 34171.7)
Case('von Hamos, not diced', 'D', thetaD, dxP, dzP, 16e8, 143767, 0.257,
4e8, 35448.9)
dxP, dzP = 0.102200009788, 0.100647361997
Case('Johansson', '*', thetaD, dxP, dzP, 256e6, 2.90378e6, 0.154,
256e6, 1.10778e6)
Case('Johann', 'o', thetaD, dxP, dzP, 256e6, 2.05992e6, 0.365,
256e6, 1.12375e6)
dxP, dzP = 0.108328852831, 0.0969846310393
Case('Johann as von Hamos', 's', thetaD, dxP, dzP, 400e6, 143235, 0.086,
4e8, 143160)
block = len(cases) // 3
def plot_res_eff():
fig = plt.figure(figsize=(8, 6), dpi=72)
rect2d = [0.1, 0.1, 0.5, 0.74]
ax1 = fig.add_axes(rect2d, aspect='auto')
# ax2 = ax1.twinx()
if band == 'narrow':
bn = r'The source energy band equals $\pm 3\cdot\delta E$.'
elif band == '8e-4':
bn = r'The source energy band equals $8\cdot 10^{-4}$.'
else:
raise
title = 'Resolution-efficiency chart of 1D bent Si444 crystal analysers\n'\
+ 'at 10$^{13}$ ph/s incoming flux'\
+ u' and 100×100 µm² source size.\n' + bn
fig.text(0.5, 0.85, title, transform=fig.transFigure, size=14, color='k',
ha='center')
ax1.set_xlabel(r'resolution $\delta E$ (eV)', fontsize=14)
ax1.set_ylabel(u'flux at detector (ph/s)', fontsize=14)
# ax2.set_ylabel(u'vertical size FWHM (µm)', color='b')
# fig.subplots_adjust(right=0.88, bottom=0.12)
lines = []
labels = []
for case in cases:
l, = ax1.loglog(case.dE, case.flux, case.color+case.style,
alpha=case.alpha, ms=10)
lines.append(l)
labels.append(case.name)
for curve in range(block):
x = [case.dE for case in cases[curve::block]]
y = [case.flux for case in cases[curve::block]]
ax1.loglog(x, y, 'gray', lw=0.5)
ax1.set_xlim(0.07, None)
ax1.set_ylim(1e5, 1.1e8)
E0s = []
for thetaDegree in thetaDegrees:
theta = np.radians(thetaDegree)
sinTheta = np.sin(theta)
E0raw = rm.ch / (2 * crystal.d * sinTheta)
dTheta = crystal.get_dtheta_symmetric_Bragg(E0raw)
E0 = rm.ch / (2 * crystal.d * np.sin(theta + dTheta))
E0s.append(E0)
labelThetas = [r'{0}$^\circ,\ E\approx{1}$ eV'.format(
t, int(round(e))) for t, e in zip(thetaDegrees, E0s)]
legBragg = ax1.legend(lines[::block], labelThetas,
title='Bragg angle, Si444', numpoints=1, loc=(1, 0))
for line in legBragg.get_lines():
line._legmarker.set_marker([(4, 3), (-4, 3), (-4, -3), (4, -3)])
# line._legmarker.set_marker('.')
# raise
leg = ax1.legend(lines[:block], labels[:block], title='crystal type',
numpoints=1, loc=(1, 0.5))
for line in leg.get_lines():
line._legmarker.set_markerfacecolor('gray')
line._legmarker.set_markeredgecolor('gray')
ax1.add_artist(legBragg)
if band == 'narrow':
fname = 'ResolutionEfficiency1D-narrowBand'
elif band == '8e-4':
fname = 'ResolutionEfficiency1D-8e-4Band'
else:
raise
fig.savefig(fname+'.png')
fig.savefig(fname+'.pdf')
if __name__ == '__main__':
plot_res_eff()
plt.show()
| mit |
tornadozou/tensorflow | tensorflow/python/estimator/inputs/queues/feeding_queue_runner_test.py | 116 | 5164 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.estimator.inputs.queues import feeding_functions as ff
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff._enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff._enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff._enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff._enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
lazywei/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 294 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
luis-rr/nest-simulator | pynest/examples/structural_plasticity.py | 13 | 13366 | # -*- coding: utf-8 -*-
#
# structural_plasticity.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Structural Plasticity example
-----------------------
This example shows a simple network of two populations where structural
plasticity is used. The network has 1000 neurons, 80% excitatory and
20% inhibitory. The simulation starts without any connectivity. A set of
homeostatic rules are defined, according to which structural plasticity will
create and delete synapses dynamically during the simulation until a desired
level of electrical activity is reached. The model of structural plasticity
used here corresponds to the formulation presented in Butz, M., & van Ooyen, A.
(2013). A simple rule for dendritic spine and axonal bouton formation can
account for cortical reorganization after focal retinal lesions.
PLoS Comput. Biol. 9 (10), e1003259.
At the end of the simulation, a plot of the evolution of the connectivity
in the network and the average calcium concentration in the neurons is created.
'''
import nest
import numpy
import matplotlib.pyplot as pl
import sys
'''
First, we have import all necessary modules.
'''
class StructralPlasticityExample:
def __init__(self):
'''
We define general simulation parameters
'''
# simulated time (ms)
self.t_sim = 200000.0
# simulation step (ms).
self.dt = 0.1
self.number_excitatory_neurons = 800
self.number_inhibitory_neurons = 200
# Structural_plasticity properties
self.update_interval = 1000
self.record_interval = 1000.0
# rate of background Poisson input
self.bg_rate = 10000.0
self.neuron_model = 'iaf_psc_exp'
'''
In this implementation of structural plasticity, neurons grow
connection points called synaptic elements. Synapses can be created
between compatible synaptic elements. The growth of these elements is
guided by homeostatic rules, defined as growth curves.
Here we specify the growth curves for synaptic elements of excitatory
and inhibitory neurons.
'''
# Excitatory synaptic elements of excitatory neurons
self.growth_curve_e_e = {
'growth_curve': "gaussian",
'growth_rate': 0.0001, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': 0.05, # Ca2+
}
# Inhibitory synaptic elements of excitatory neurons
self.growth_curve_e_i = {
'growth_curve': "gaussian",
'growth_rate': 0.0001, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': self.growth_curve_e_e['eps'], # Ca2+
}
# Excitatory synaptic elements of inhibitory neurons
self.growth_curve_i_e = {
'growth_curve': "gaussian",
'growth_rate': 0.0004, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': 0.2, # Ca2+
}
# Inhibitory synaptic elements of inhibitory neurons
self.growth_curve_i_i = {
'growth_curve': "gaussian",
'growth_rate': 0.0001, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': self.growth_curve_i_e['eps'] # Ca2+
}
'''
Now we specify the neuron model.
'''
self.model_params = {'tau_m': 10.0, # membrane time constant (ms)
# excitatory synaptic time constant (ms)
'tau_syn_ex': 0.5,
# inhibitory synaptic time constant (ms)
'tau_syn_in': 0.5,
't_ref': 2.0, # absolute refractory period (ms)
'E_L': -65.0, # resting membrane potential (mV)
'V_th': -50.0, # spike threshold (mV)
'C_m': 250.0, # membrane capacitance (pF)
'V_reset': -65.0 # reset potential (mV)
}
self.nodes_e = None
self.nodes_i = None
self.mean_ca_e = []
self.mean_ca_i = []
self.total_connections_e = []
self.total_connections_i = []
'''
We initialize variables for the post-synaptic currents of the
excitatory, inhibitory and external synapses. These values were
calculated from a PSP amplitude of 1 for excitatory synapses,
-1 for inhibitory synapses and 0.11 for external synapses.
'''
self.psc_e = 585.0
self.psc_i = -585.0
self.psc_ext = 6.2
def prepare_simulation(self):
nest.ResetKernel()
nest.set_verbosity('M_ERROR')
'''
We set global kernel parameters. Here we define the resolution
for the simulation, which is also the time resolution for the update
of the synaptic elements.
'''
nest.SetKernelStatus(
{
'resolution': self.dt
}
)
'''
Set Structural Plasticity synaptic update interval which is how often
the connectivity will be updated inside the network. It is important
to notice that synaptic elements and connections change on different
time scales.
'''
nest.SetStructuralPlasticityStatus({
'structural_plasticity_update_interval': self.update_interval,
})
'''
Now we define Structural Plasticity synapses. In this example we create
two synapse models, one for excitatory and one for inhibitory synapses.
Then we define that excitatory synapses can only be created between a
pre synaptic element called 'Axon_ex' and a post synaptic element
called Den_ex. In a similar manner, synaptic elements for inhibitory
synapses are defined.
'''
nest.CopyModel('static_synapse', 'synapse_ex')
nest.SetDefaults('synapse_ex', {'weight': self.psc_e, 'delay': 1.0})
nest.CopyModel('static_synapse', 'synapse_in')
nest.SetDefaults('synapse_in', {'weight': self.psc_i, 'delay': 1.0})
nest.SetStructuralPlasticityStatus({
'structural_plasticity_synapses': {
'synapse_ex': {
'model': 'synapse_ex',
'post_synaptic_element': 'Den_ex',
'pre_synaptic_element': 'Axon_ex',
},
'synapse_in': {
'model': 'synapse_in',
'post_synaptic_element': 'Den_in',
'pre_synaptic_element': 'Axon_in',
},
}
})
def create_nodes(self):
'''
Now we assign the growth curves to the corresponding synaptic elements
'''
synaptic_elements = {
'Den_ex': self.growth_curve_e_e,
'Den_in': self.growth_curve_e_i,
'Axon_ex': self.growth_curve_e_e,
}
synaptic_elements_i = {
'Den_ex': self.growth_curve_i_e,
'Den_in': self.growth_curve_i_i,
'Axon_in': self.growth_curve_i_i,
}
'''
Then it is time to create a population with 80% of the total network
size excitatory neurons and another one with 20% of the total network
size of inhibitory neurons.
'''
self.nodes_e = nest.Create('iaf_psc_alpha',
self.number_excitatory_neurons,
{'synaptic_elements': synaptic_elements})
self.nodes_i = nest.Create('iaf_psc_alpha',
self.number_inhibitory_neurons,
{'synaptic_elements': synaptic_elements_i})
nest.SetStatus(self.nodes_e, 'synaptic_elements', synaptic_elements)
nest.SetStatus(self.nodes_i, 'synaptic_elements', synaptic_elements_i)
def connect_external_input(self):
'''
We create and connect the Poisson generator for external input
'''
noise = nest.Create('poisson_generator')
nest.SetStatus(noise, {"rate": self.bg_rate})
nest.Connect(noise, self.nodes_e, 'all_to_all',
{'weight': self.psc_ext, 'delay': 1.0})
nest.Connect(noise, self.nodes_i, 'all_to_all',
{'weight': self.psc_ext, 'delay': 1.0})
'''
In order to save the amount of average calcium concentration in each
population through time we create the function record_ca. Here we use the
GetStatus function to retrieve the value of Ca for every neuron in the
network and then store the average.
'''
def record_ca(self):
ca_e = nest.GetStatus(self.nodes_e, 'Ca'), # Calcium concentration
self.mean_ca_e.append(numpy.mean(ca_e))
ca_i = nest.GetStatus(self.nodes_i, 'Ca'), # Calcium concentration
self.mean_ca_i.append(numpy.mean(ca_i))
'''
In order to save the state of the connectivity in the network through time
we create the function record_connectivity. Here we use the GetStatus
function to retrieve the number of connected pre synaptic elements of each
neuron. The total amount of excitatory connections is equal to the total
amount of connected excitatory pre synaptic elements. The same applies for
inhibitory connections.
'''
def record_connectivity(self):
syn_elems_e = nest.GetStatus(self.nodes_e, 'synaptic_elements')
syn_elems_i = nest.GetStatus(self.nodes_i, 'synaptic_elements')
self.total_connections_e.append(sum(neuron['Axon_ex']['z_connected']
for neuron in syn_elems_e))
self.total_connections_i.append(sum(neuron['Axon_in']['z_connected']
for neuron in syn_elems_i))
'''
We define a function to plot the recorded values
at the end of the simulation.
'''
def plot_data(self):
fig, ax1 = pl.subplots()
ax1.axhline(self.growth_curve_e_e['eps'],
linewidth=4.0, color='#9999FF')
ax1.plot(self.mean_ca_e, 'b',
label='Ca Concentration Excitatory Neurons', linewidth=2.0)
ax1.axhline(self.growth_curve_i_e['eps'],
linewidth=4.0, color='#FF9999')
ax1.plot(self.mean_ca_i, 'r',
label='Ca Concentration Inhibitory Neurons', linewidth=2.0)
ax1.set_ylim([0, 0.275])
ax1.set_xlabel("Time in [s]")
ax1.set_ylabel("Ca concentration")
ax2 = ax1.twinx()
ax2.plot(self.total_connections_e, 'm',
label='Excitatory connections', linewidth=2.0, linestyle='--')
ax2.plot(self.total_connections_i, 'k',
label='Inhibitory connections', linewidth=2.0, linestyle='--')
ax2.set_ylim([0, 2500])
ax2.set_ylabel("Connections")
ax1.legend(loc=1)
ax2.legend(loc=4)
pl.savefig('StructuralPlasticityExample.eps', format='eps')
'''
It is time to specify how we want to perform the simulation. In this
function we first enable structural plasticity in the network and then we
simulate in steps. On each step we record the calcium concentration and the
connectivity. At the end of the simulation, the plot of connections and
calcium concentration through time is generated.
'''
def simulate(self):
if nest.NumProcesses() > 1:
sys.exit("For simplicity, this example only works " +
"for a single process.")
nest.EnableStructuralPlasticity()
print("Starting simulation")
sim_steps = numpy.arange(0, self.t_sim, self.record_interval)
for i, step in enumerate(sim_steps):
nest.Simulate(self.record_interval)
self.record_ca()
self.record_connectivity()
if i % 20 == 0:
print("Progress: " + str(i / 2) + "%")
print("Simulation finished successfully")
'''
Finally we take all the functions that we have defined and create the sequence
for our example. We prepare the simulation, create the nodes for the network,
connect the external input and then simulate. Please note that as we are
simulating 200 biological seconds in this example, it will take a few minutes
to complete.
'''
if __name__ == '__main__':
example = StructralPlasticityExample()
# Prepare simulation
example.prepare_simulation()
example.create_nodes()
example.connect_external_input()
# Start simulation
example.simulate()
example.plot_data()
| gpl-2.0 |
ndingwall/scikit-learn | examples/linear_model/plot_omp.py | 21 | 2158 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx], use_line_collection=True)
# plot the noise-free reconstruction
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r], use_line_collection=True)
# plot the noisy reconstruction
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r], use_line_collection=True)
# plot the noisy reconstruction with number of non-zeros set by CV
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r], use_line_collection=True)
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
anubhav929/eden | modules/s3_update_check.py | 2 | 7813 | # -*- coding: utf-8 -*-
import os
import sys
try:
from gluon import current
except ImportError:
print >> sys.stderr, """
The installed version of Web2py is too old -- it does not define current.
Please upgrade Web2py to a more recent version.
"""
# Version of 000_config.py
# Increment this if the user should update their running instance
VERSION = 1
#def update_check(environment, template="default"):
def update_check():
"""
Check whether the dependencies are sufficient to run Eden
@ToDo: Integrate into WebSetup:
http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/WebSetup
"""
# Get Web2py environment into our globals.
#globals().update(**environment)
request = current.request
# Fatal errors
errors = []
# Non-fatal warnings
warnings = []
# -------------------------------------------------------------------------
# Check Python libraries
try:
import dateutil
except(ImportError):
errors.append("S3 unresolved dependency: dateutil required for Sahana to run")
try:
import lxml
except(ImportError):
errors.append("S3XML unresolved dependency: lxml required for Sahana to run")
try:
import shapely
except(ImportError):
warnings.append("S3GIS unresolved dependency: shapely required for GIS support")
try:
import xlrd
except(ImportError):
warnings.append("S3XLS unresolved dependency: xlrd required for XLS import")
try:
import xlwt
except(ImportError):
warnings.append("S3XLS unresolved dependency: xlwt required for XLS export")
try:
from PIL import Image
except(ImportError):
try:
import Image
except(ImportError):
warnings.append("S3PDF unresolved dependency: Python Imaging required for PDF export")
try:
import reportlab
except(ImportError):
warnings.append("S3PDF unresolved dependency: reportlab required for PDF export")
try:
import matplotlib
except(ImportError):
warnings.append("S3Chart unresolved dependency: matplotlib required for charting")
try:
import numpy
except(ImportError):
warnings.append("S3Cube unresolved dependency: numpy required for pivot table reports")
try:
import tweepy
except(ImportError):
warnings.append("S3Msg unresolved dependency: tweepy required for non-Tropo Twitter support")
try:
import PyRTF
except(ImportError):
warnings.append("Survey unresolved dependency: PyRTF required if you want to export assessment templates as a Word document")
# -------------------------------------------------------------------------
# Check Web2Py version
#
# Currently, the minimum usable Web2py is determined by whether the
# Scheduler is available
web2py_minimum_version = "Version 1.99.3 (2011-10-27 13:23:13)"
web2py_version_ok = True
try:
from gluon.fileutils import parse_version
except ImportError:
web2py_version_ok = False
if web2py_version_ok:
web2py_minimum_datetime = parse_version(web2py_minimum_version)[3]
web2py_installed_datetime = request.global_settings.web2py_version[3]
web2py_version_ok = web2py_installed_datetime >= web2py_minimum_datetime
if not web2py_version_ok:
warnings.append(
"The installed version of Web2py is too old to provide the Scheduler,"
"\nso scheduled tasks will not be available. If you need scheduled tasks,"
"\nplease upgrade Web2py to at least version: %s" % \
web2py_minimum_version)
# -------------------------------------------------------------------------
# Create required directories if needed
app_path = request.folder
databases_dir = os.path.join(app_path, "databases")
try:
os.stat(databases_dir)
except OSError:
# not found, create it
os.mkdir(databases_dir)
# -------------------------------------------------------------------------
# Copy in Templates
# - 000_config.py (machine-specific settings)
# - rest are run in-place
#
template_folder = os.path.join(app_path, "private", "templates")
template_files = {
# source : destination
"000_config.py" : os.path.join("models", "000_config.py"),
}
copied_from_template = []
for t in template_files:
src_path = os.path.join(template_folder, t)
dst_path = os.path.join(app_path, template_files[t])
try:
os.stat(dst_path)
except OSError:
# Not found, copy from template
if t == "000_config.py":
input = open(src_path)
output = open(dst_path, "w")
for line in input:
if "akeytochange" in line:
# Generate a random hmac_key to secure the passwords in case
# the database is compromised
import uuid
hmac_key = uuid.uuid4()
line = 'deployment_settings.auth.hmac_key = "%s"' % hmac_key
output.write(line)
output.close()
input.close()
else:
import shutil
shutil.copy(src_path, dst_path)
copied_from_template.append(template_files[t])
else:
# Found the file in the destination
# Check if it has been edited
import re
edited_pattern = r"FINISHED_EDITING_\w*\s*=\s*(True|False)"
edited_matcher = re.compile(edited_pattern).match
has_edited = False
with open(dst_path) as f:
for line in f:
edited_result = edited_matcher(line)
if edited_result:
has_edited = True
edited = edited_result.group(1)
break
if has_edited and (edited != "True"):
errors.append("Please edit %s before starting the system." % t)
# Check if it's up to date (i.e. a critical update requirement)
version_pattern = r"VERSION =\s*([0-9]+)"
version_matcher = re.compile(version_pattern).match
has_version = False
with open(dst_path) as f:
for line in f:
version_result = version_matcher(line)
if version_result:
has_version = True
version = version_result.group(1)
break
if not has_version:
error = "Your %s is using settings from the old templates system. Please switch to the new templates system: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Templates" % t
errors.append(error)
elif int(version) != VERSION:
error = "Your %s is using settings from template version %s. Please update with new settings from template version %s before starting the system." % \
(t, version, VERSION)
errors.append(error)
if copied_from_template:
errors.append(
"The following files were copied from templates and should be edited: %s" %
", ".join(copied_from_template))
return {"error_messages": errors, "warning_messages": warnings}
# END =========================================================================
| mit |
weidel-p/nest-simulator | pynest/examples/glif_cond_neuron.py | 5 | 9609 | # -*- coding: utf-8 -*-
#
# glif_cond_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Conductance-based generalized leaky integrate and fire (GLIF) neuron example
--------------------------------
Simple example of how to use the ``glif_cond`` neuron model for
five different levels of GLIF neurons.
Four stimulation paradigms are illustrated for the GLIF model
with externally applied current and spikes impinging
Voltage traces, injecting current traces, threshold traces, synaptic
conductance traces and spikes are shown.
KEYWORDS: glif_cond
"""
##############################################################################
# First, we import all necessary modules to simulate, analyze and plot this
# example.
import nest
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
##############################################################################
# We initialize the nest and set the simulation resolution.
nest.ResetKernel()
resolution = 0.05
nest.SetKernelStatus({"resolution": resolution})
###############################################################################
# We create the five levels of GLIF model to be tested, i.e.,
# ``lif``, ``lif_r``, ``lif_asc``, ``lif_r_asc``, ``lif_r_asc_a``.
# For each level of GLIF model, we create a ``glif_cond`` node. The node is
# created by setting relative model mechanism parameters. Other neuron
# parameters are set as default. The five ``glif_cond`` node handles are
# combined as a list. Note that the default number of synaptic ports
# is two for spike inputs. One port is excitation receptor with time
# constant being 0.2 ms and reversal potential being 0.0 mV. The other port is
# inhibition receptor with time constant being 2.0 ms and -85.0 mV.
# Note that users can set as many synaptic ports as needed for ``glif_cond``
# by setting array parameters ``tau_syn`` and ``E_rev`` of the model.
n_lif = nest.Create("glif_cond",
params={"spike_dependent_threshold": False,
"after_spike_currents": False,
"adapting_threshold": False})
n_lif_r = nest.Create("glif_cond",
params={"spike_dependent_threshold": True,
"after_spike_currents": False,
"adapting_threshold": False})
n_lif_asc = nest.Create("glif_cond",
params={"spike_dependent_threshold": False,
"after_spike_currents": True,
"adapting_threshold": False})
n_lif_r_asc = nest.Create("glif_cond",
params={"spike_dependent_threshold": True,
"after_spike_currents": True,
"adapting_threshold": False})
n_lif_r_asc_a = nest.Create("glif_cond",
params={"spike_dependent_threshold": True,
"after_spike_currents": True,
"adapting_threshold": True})
neurons = n_lif + n_lif_r + n_lif_asc + n_lif_r_asc + n_lif_r_asc_a
###############################################################################
# For the stimulation input to the glif_cond neurons, we create one excitation
# spike generator and one inhibition spike generator, each of which generates
# three spikes; we also create one step current generator and a Poisson
# generator, a parrot neuron(to be paired with the Poisson generator).
# The three different injections are spread to three different time periods,
# i.e., 0 ms ~ 200 ms, 200 ms ~ 500 ms, 600 ms ~ 900 ms.
# Configuration of the current generator includes the definition of the start
# and stop times and the amplitude of the injected current. Configuration of
# the Poisson generator includes the definition of the start and stop times and
# the rate of the injected spike train.
espikes = nest.Create("spike_generator",
params={"spike_times": [10., 100., 150.],
"spike_weights": [20.]*3})
ispikes = nest.Create("spike_generator",
params={"spike_times": [15., 99., 150.],
"spike_weights": [-20.]*3})
cg = nest.Create("step_current_generator",
params={"amplitude_values": [400., ],
"amplitude_times": [200., ],
"start": 200., "stop": 500.})
pg = nest.Create("poisson_generator",
params={"rate": 15000., "start": 600., "stop": 900.})
pn = nest.Create("parrot_neuron")
###############################################################################
# The generators are then connected to the neurons. Specification of
# the ``receptor_type`` uniquely defines the target receptor.
# We connect current generator to receptor 0, the excitation spike generator
# and the Poisson generator (via parrot neuron) to receptor 1, and the
# inhibition spike generator to receptor 2 of the GLIF neurons.
# Note that Poisson generator is connected to parrot neuron to transit the
# spikes to the glif_cond neuron.
nest.Connect(cg, neurons, syn_spec={"delay": resolution})
nest.Connect(espikes, neurons,
syn_spec={"delay": resolution, "receptor_type": 1})
nest.Connect(ispikes, neurons,
syn_spec={"delay": resolution, "receptor_type": 2})
nest.Connect(pg, pn, syn_spec={"delay": resolution})
nest.Connect(pn, neurons, syn_spec={"delay": resolution, "receptor_type": 1})
###############################################################################
# A ``multimeter`` is created and connected to the neurons. The parameters
# specified for the multimeter include the list of quantities that should be
# recorded and the time interval at which quantities are measured.
mm = nest.Create("multimeter",
params={"interval": resolution,
"record_from": ["V_m", "I", "g_1", "g_2",
"threshold",
"threshold_spike",
"threshold_voltage",
"ASCurrents_sum"]})
nest.Connect(mm, neurons)
###############################################################################
# A ``spike_detector`` is created and connected to the neurons record the
# spikes generated by the glif_cond neurons.
sd = nest.Create("spike_detector")
nest.Connect(neurons, sd)
###############################################################################
# Run the simulation for 1000 ms and retrieve recorded data from
# the multimeter and spike detector.
nest.Simulate(1000.)
data = mm.events
senders = data["senders"]
spike_data = sd.events
spike_senders = spike_data["senders"]
spikes = spike_data["times"]
###############################################################################
# We plot the time traces of the membrane potential (in blue) and
# the overall threshold (in green), and the spikes (as red dots) in one panel;
# the spike component of threshold (in yellow) and the voltage component of
# threshold (in black) in another panel; the injected currents(in strong blue),
# the sum of after spike currents(in cyan) in the third panel; and the synaptic
# conductances of the two receptors (in blue and orange) in responding to the
# spike inputs to the neurons in the fourth panel. We plot all these four
# panels for each level of GLIF model in a seperated figure.
glif_models = ["lif", "lif_r", "lif_asc", "lif_r_asc", "lif_r_asc_a"]
for i in range(len(glif_models)):
glif_model = glif_models[i]
node_id = neurons[i].global_id
plt.figure(glif_model)
gs = gridspec.GridSpec(4, 1, height_ratios=[2, 1, 1, 1])
t = data["times"][senders == 1]
ax1 = plt.subplot(gs[0])
plt.plot(t, data["V_m"][senders == node_id], "b")
plt.plot(t, data["threshold"][senders == node_id], "g--")
plt.plot(spikes[spike_senders == node_id],
[max(data["threshold"][senders == node_id]) * 0.95] *
len(spikes[spike_senders == node_id]), "r.")
plt.legend(["V_m", "threshold", "spike"])
plt.ylabel("V (mV)")
plt.title("Simulation of glif_cond neuron of " + glif_model)
ax2 = plt.subplot(gs[1])
plt.plot(t, data["threshold_spike"][senders == node_id], "y")
plt.plot(t, data["threshold_voltage"][senders == node_id], "k--")
plt.legend(["threshold_spike", "threshold_voltage"])
plt.ylabel("V (mV)")
ax3 = plt.subplot(gs[2])
plt.plot(t, data["I"][senders == node_id], "--")
plt.plot(t, data["ASCurrents_sum"][senders == node_id], "c-.")
plt.legend(["I_e", "ASCurrents_sum", "I_syn"])
plt.ylabel("I (pA)")
plt.xlabel("t (ms)")
ax4 = plt.subplot(gs[3])
plt.plot(t, data["g_1"][senders == node_id], "-")
plt.plot(t, data["g_2"][senders == node_id], "--")
plt.legend(["G_1", "G_2"])
plt.ylabel("G (nS)")
plt.xlabel("t (ms)")
plt.show()
| gpl-2.0 |
pypot/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 32 | 6044 | import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
# Test NNDSVD behaviour on negative input
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
# Test that NNDSVD does not return negative values
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
# the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
# Test model fit behaviour on negative input
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
# Test that the fit is not too far away
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
# Test that NMF.transform returns close values
# (transform uses scipy.optimize.nnls for now)
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF()
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
| bsd-3-clause |
jonathancrabtree/biocode | sandbox/jorvis/plot_read_alignment_ratios.py | 2 | 4062 | #!/usr/bin/env python3
"""
./plot_read_alignment_ratios.py -i /usr/local/projects/aplysia/A1_CNS_reorientation.log -o /usr/local/projects/aplysia/A1_CNS_reorientation.png
Count of ratios < 0.05: 47414
Count of ratios > 0.95: 53006
./plot_read_alignment_ratios.py -i /usr/local/projects/aplysia/A3_digestive_reorientation.log -o /usr/local/projects/aplysia/A3_digestive_reorientation.png
Count of ratios < 0.05: 44087
Count of ratios > 0.95: 49084
./plot_read_alignment_ratios.py -i /usr/local/projects/aplysia/A7_heart_reorientation.log -o /usr/local/projects/aplysia/A7_heart_reorientation.png
Count of ratios < 0.05: 45995
Count of ratios > 0.95: 52188
./plot_read_alignment_ratios.py -i /usr/local/projects/aplysia/A10_gills_reorientation.log -o /usr/local/projects/aplysia/A10_gills_reorientation.png
Count of ratios < 0.05: 49941
Count of ratios > 0.95: 55683
Ranges wanted:
0.125 - 0.875
0.25 - 0.75
0.475 - 0.525
"""
import argparse
import os
import re
import biocodeutils
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Agg')
import sys
def main():
parser = argparse.ArgumentParser( description='Put a description of your script here' )
## output file to be written
parser.add_argument('-i', '--input_file', type=str, required=True, help='Path to an input file to be read' )
parser.add_argument('-o', '--output_file', type=str, required=True, help='Path to an output file to be created' )
parser.add_argument('-f', '--fasta_file', type=str, required=True, help='Path to an input FASTA file' )
args = parser.parse_args()
ratios = list()
# if set to true, the IDs in the mid-range will be printed to STDOUT
print_ids = True
RATIO_MIN = 0.05
RATIO_MAX = 0.95
#RATIO_MIN = 0.125
#RATIO_MAX = 0.875
#RATIO_MIN = 0.25
#RATIO_MAX = 0.75
#RATIO_MIN = 0.475
#RATIO_MAX = 0.525
LENGTH_CUTOFF = 350
ratio_min_count = 0
ratio_bet_count = 0
ratio_max_count = 0
fasta = biocodeutils.fasta_dict_from_file( args.fasta_file )
for line in open(args.input_file):
# lines are like: comp0_c0_seq1 1-T:6 1-F:0 2-T:0 2-F:5
m = re.search('(.+)\t1-T:(\d+)\t1-F:(\d+)\t2-T:(\d+)\t2-F:(\d+)', line)
if m:
seq_id = m.group(1)
if seq_id in fasta:
if len(fasta[seq_id]['s']) < LENGTH_CUTOFF:
continue
else:
raise Exception("Expected but filed to find seq ID {0} in FASTA file".format(seq_id))
f_reads_correctly_mapped = int(m.group(2))
f_reads_incorrectly_mapped = int(m.group(3))
r_reads_correctly_mapped = int(m.group(5))
r_reads_incorrectly_mapped = int(m.group(4))
f_read_count = f_reads_correctly_mapped + f_reads_incorrectly_mapped
if f_read_count > 0:
correct_ratio = f_reads_correctly_mapped / f_read_count
ratios.append(correct_ratio)
if correct_ratio < RATIO_MIN:
ratio_min_count += 1
elif correct_ratio > RATIO_MAX:
ratio_max_count += 1
else:
ratio_bet_count += 1
if print_ids == True:
print(seq_id)
#print("LOG: Fcorrect:{0} Fwrong:{1} Ftotal:{2} ratio:{3}".format(f_reads_correctly_mapped, f_reads_incorrectly_mapped, f_read_count, correct_ratio))
plt.hist(ratios, bins=100)
plt.xlabel("Correct read orientation alignment ratio")
plt.ylabel("Log of transcript count")
plt.grid(True)
#plt.ylim(0,5000)
plt.gca().set_yscale("log")
plt.savefig(args.output_file)
sys.stderr.write("Count of ratios < {0}: {1}\n".format(RATIO_MIN, ratio_min_count))
sys.stderr.write("Count where {0} > ratio < {1}: {2}\n".format(RATIO_MIN, RATIO_MAX, ratio_bet_count))
sys.stderr.write("Count of ratios > {0}: {1}\n".format(RATIO_MAX, ratio_max_count))
if __name__ == '__main__':
main()
| gpl-3.0 |
FluVigilanciaBR/seasonality | methods/misc/historico_v2.py | 1 | 5930 | # coding:utf8
__author__ = 'Marcelo Ferreira da Costa Gomes'
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
dfcases = pd.read_csv('methods/clean_data/clean_data_srag_sragnofever_epiweek.csv', low_memory=False).loc[
lambda df: df.DT_SIN_PRI_epiyear == 2020,
['SG_UF_NOT',
'DT_SIN_PRI_epiweek',
'DT_DIGITA',
'SinPri2Digita_DelayWeeks']]
dfcases.rename(columns={'SG_UF_NOT': 'UF',
'DT_SIN_PRI_epiweek': 'epiweek',
'DT_DIGITA': 'digita',
'SinPri2Digita_DelayWeeks': 'delay'},
inplace=True)
dfdelays = dfcases[dfcases.delay <= 5].groupby(by=['UF',
'epiweek',
'delay']).size().reset_index()
dfdelays.rename(columns={0: 'SRAG'}, inplace=True)
dfdelays.UF = dfdelays.UF.astype(int).astype(str)
dftmp = dfdelays[['UF', 'epiweek', 'SRAG', 'delay']].groupby(by=['epiweek', 'delay'], as_index=False).agg(sum)
dftmp['UF'] = 'BR'
dfdelays = dfdelays.append(dftmp, ignore_index=True, sort=False)
dfpop = pd.read_csv('methods/data/populacao_uf_regional_atual.csv')
dfdelays = dfdelays.merge(dfpop[['Código', 'Total']].rename(columns={'Código': 'UF', 'Total': 'populacao'}),
on='UF',
how='left')
dfdelays.SRAG = 100000*dfdelays.SRAG.divide(dfdelays.populacao)
dfdelays.delay = dfdelays.delay.astype(int)
df = pd.read_csv('methods/clean_data/srag_sragnofever_historical_estimated_incidence.csv')[
lambda df: df.epiyear == 2020]
dfc = pd.read_csv('methods/clean_data/srag_sragnofever_current_estimated_incidence.csv')[lambda df: df.epiyear ==
2020]
df0 = df.loc[df.base_epiweek == df.epiweek, ['UF', 'epiweek', '50%', '2.5%', '97.5%']].copy()
df0 = df0.merge(dfdelays.loc[dfdelays.delay == 0, ['UF', 'epiweek', 'SRAG']],
on=['UF', 'epiweek'],
how='left').fillna(0, axis=1)
df0['SRAG_d0'] = df0['SRAG']
for i in range(1, 5):
df0 = df0.merge(dfdelays.loc[dfdelays.delay == i, ['UF', 'epiweek', 'SRAG']],
on=['UF', 'epiweek'], suffixes=('', '_d%s' % i), how='left').fillna(0, axis=1)
df0['SRAG_d%s' % i] += df0['SRAG_d%s' % (i-1)]
df0 = df0.merge(dfc[['UF', 'epiweek', 'SRAG']], how='left', on=['UF', 'epiweek'], suffixes=('', '_c'))
df0 = df0.merge(df.loc[(df.base_epiweek - 1) == df.epiweek, ['UF', 'epiweek', '50%', '2.5%', '97.5%']],
on=['UF', 'epiweek'], suffixes=('', '_d%s' % 1), how='left')
# Plot
dfid = pd.read_csv('methods/report/territorios.csv')
for uf in df0.UF.unique():
plt.close('all')
fig = plt.figure(figsize=[8, 6], dpi=100)
ax = fig.add_axes([.07,.08,.92,.85])
df0[df0.UF == uf].plot(x='epiweek',
y='SRAG',
style=':',
ax=ax,
lw=2,
label='Reportado ao fim da semana')
d = 1
df0[df0.UF == uf].plot(x='epiweek',
y='SRAG_d%s' % d,
style=':',
lw=2,
ax=ax,
label='%s semana depois' %d)
for i in range(2, 5):
df0[df0.UF == uf].plot(x='epiweek',
y='SRAG_d%s' % i,
style=':',
ax=ax,
label='%s semanas depois' % i,
lw=2)
df0[df0.UF == uf].plot(x='epiweek',
y='SRAG_c',
style='-',
ax=ax,
color='black',
label='Dado mais recente',
lw=2)
ax.set_prop_cycle(None)
color_cycle = ax._get_lines.prop_cycler
color_shade = next(color_cycle)['color']
df0[df0.UF == uf].plot(x='epiweek',
y='50%',
style='--',
ax=ax,
color=color_shade,
label='Estimativa ao fim da semana',
lw=2)
ax.fill_between(df0.epiweek[df0.UF == uf],
df0.loc[df0.UF == uf, '2.5%'],
df0.loc[df0.UF == uf, '97.5%'],
color=color_shade, alpha=.2, label='IC 95% ao fim da semana')
color_shade = next(color_cycle)['color']
df0[df0.UF == uf].plot(x='epiweek',
y='50%_d1',
style='--',
ax=ax,
color=color_shade,
label='Estimativa uma semana depois',
lw=2)
ax.fill_between(df0.epiweek[df0.UF == uf],
df0.loc[df0.UF == uf, '2.5%_d1'],
df0.loc[df0.UF == uf, '97.5%_d1'],
color=color_shade, alpha=.2, label='IC 95% uma semana depois')
ax.set_xlabel('Semana de primeiros sintomas', fontsize='x-large', fontfamily='Roboto')
ax.set_ylabel('Incidência por 100mil hab.', fontsize='x-large', fontfamily='Roboto')
ax.legend(loc='upper left',
fontsize='medium')
ax.set_xticks([1] + list(range(4, 37, 4)))
ax.set_xticklabels([1] + list(range(4, 37, 4)))
plt.xlim([1, 37])
lbl = dfid.sigla[dfid.UF == uf].values[0]
plt.suptitle('SRAG no %s\n-- dados inseridos até a semana 36 --' % lbl, y=.99, fontsize='x-large', fontfamily='Roboto')
plt.xticks(fontfamily='Roboto', fontsize='large')
plt.yticks(fontfamily='Roboto', fontsize='large')
plt.savefig('methods/misc/comparacao/comparacao_%s.png' % lbl)
plt.close('all')
| gpl-3.0 |
ctk3b/mdtraj | mdtraj/core/trajectory.py | 1 | 78960 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors: Kyle A. Beauchamp, TJ Lane, Joshua Adelman, Lee-Ping Wang, Jason Swails
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import os
import warnings
from copy import deepcopy
from collections import Iterable
import numpy as np
import functools
from mdtraj.formats import DCDTrajectoryFile
from mdtraj.formats import BINPOSTrajectoryFile
from mdtraj.formats import XTCTrajectoryFile
from mdtraj.formats import TRRTrajectoryFile
from mdtraj.formats import HDF5TrajectoryFile
from mdtraj.formats import NetCDFTrajectoryFile
from mdtraj.formats import LH5TrajectoryFile
from mdtraj.formats import PDBTrajectoryFile
from mdtraj.formats import MDCRDTrajectoryFile
from mdtraj.formats import DTRTrajectoryFile
from mdtraj.formats import LAMMPSTrajectoryFile
from mdtraj.formats import XYZTrajectoryFile
from mdtraj.formats import GroTrajectoryFile
from mdtraj.formats import TNGTrajectoryFile
from mdtraj.formats import AmberNetCDFRestartFile
from mdtraj.formats import AmberRestartFile
from mdtraj.formats.prmtop import load_prmtop
from mdtraj.formats.psf import load_psf
from mdtraj.formats.mol2 import load_mol2
from mdtraj.formats.gro import load_gro
from mdtraj.formats.arc import load_arc
from mdtraj.formats.hoomdxml import load_hoomdxml
from mdtraj.core.topology import Topology
from mdtraj.core.residue_names import _SOLVENT_TYPES
from mdtraj.utils import (ensure_type, in_units_of, lengths_and_angles_to_box_vectors,
box_vectors_to_lengths_and_angles, cast_indices,
deprecated)
from mdtraj.utils.six.moves import xrange
from mdtraj.utils.six import PY3, string_types
from mdtraj import _rmsd
from mdtraj import FormatRegistry
from mdtraj.geometry import distance
from mdtraj.geometry import _geometry
##############################################################################
# Globals
##############################################################################
__all__ = ['open', 'load', 'iterload', 'load_frame', 'load_topology', 'join',
'Trajectory']
# supported extensions for constructing topologies
_TOPOLOGY_EXTS = ['.pdb', '.pdb.gz', '.h5','.lh5', '.prmtop', '.parm7',
'.psf', '.mol2', '.hoomdxml', '.gro', '.arc', '.hdf5']
##############################################################################
# Utilities
##############################################################################
def _assert_files_exist(filenames):
"""Throw an IO error if files don't exist
Parameters
----------
filenames : {str, [str]}
String or list of strings to check
"""
if isinstance(filenames, string_types):
filenames = [filenames]
for fn in filenames:
if not (os.path.exists(fn) and os.path.isfile(fn)):
raise IOError('No such file: %s' % fn)
def _assert_files_or_dirs_exist(names):
"""Throw an IO error if files don't exist
Parameters
----------
filenames : {str, [str]}
String or list of strings to check
"""
if isinstance(names, string_types):
names = [names]
for fn in names:
if not (os.path.exists(fn) and \
(os.path.isfile(fn) or os.path.isdir(fn))):
raise IOError('No such file: %s' % fn)
if PY3:
def _hash_numpy_array(x):
hash_value = hash(x.shape)
hash_value ^= hash(x.strides)
hash_value ^= hash(x.data.tobytes())
return hash_value
else:
def _hash_numpy_array(x):
writeable = x.flags.writeable
try:
x.flags.writeable = False
hash_value = hash(x.shape)
hash_value ^= hash(x.strides)
hash_value ^= hash(x.data)
finally:
x.flags.writeable = writeable
return hash_value
def load_topology(filename, **kwargs):
"""Load a topology
Parameters
----------
filename : str
Path to a file containing a system topology. The following extensions
are supported: '.pdb', '.pdb.gz', '.h5','.lh5', '.prmtop', '.parm7',
'.psf', '.mol2', '.hoomdxml'
Returns
-------
topology : md.Topology
"""
return _parse_topology(filename, **kwargs)
def _parse_topology(top, **kwargs):
"""Get the topology from a argument of indeterminate type
If top is a string, we try loading a pdb, if its a trajectory
we extract its topology.
Returns
-------
topology : md.Topology
"""
if isinstance(top, string_types):
ext = _get_extension(top)
else:
ext = None # might not be a string
if isinstance(top, string_types) and (ext in ['.pdb', '.pdb.gz', '.h5','.lh5']):
_traj = load_frame(top, 0, **kwargs)
topology = _traj.topology
elif isinstance(top, string_types) and (ext in ['.prmtop', '.parm7']):
topology = load_prmtop(top, **kwargs)
elif isinstance(top, string_types) and (ext in ['.psf']):
topology = load_psf(top, **kwargs)
elif isinstance(top, string_types) and (ext in ['.mol2']):
topology = load_mol2(top, **kwargs).topology
elif isinstance(top, string_types) and (ext in ['.gro']):
topology = load_gro(top, **kwargs).topology
elif isinstance(top, string_types) and (ext in ['.arc']):
topology = load_arc(top, **kwargs).topology
elif isinstance(top, string_types) and (ext in ['.hoomdxml']):
topology = load_hoomdxml(top, **kwargs).topology
elif isinstance(top, Trajectory):
topology = top.topology
elif isinstance(top, Topology):
topology = top
elif isinstance(top, string_types):
raise IOError('The topology is loaded by filename extension, and the '
'detected "%s" format is not supported. Supported topology '
'formats include %s and "%s".' % (
ext, ', '.join(['"%s"' % e for e in _TOPOLOGY_EXTS[:-1]]),
_TOPOLOGY_EXTS[-1]))
else:
raise TypeError('A topology is required. You supplied top=%s' % str(top))
return topology
def _get_extension(filename):
(base, extension) = os.path.splitext(filename)
if extension == '.gz':
extension2 = os.path.splitext(base)[1]
return extension2 + extension
return extension
##############################################################################
# Utilities
##############################################################################
def open(filename, mode='r', force_overwrite=True, **kwargs):
"""Open a trajectory file-like object
This factor function returns an instance of an open file-like
object capable of reading/writing the trajectory (depending on
'mode'). It does not actually load the trajectory from disk or
write anything.
Parameters
----------
filename : str
Path to the trajectory file on disk
mode : {'r', 'w'}
The mode in which to open the file, either 'r' for read or 'w' for
write.
force_overwrite : bool
If opened in write mode, and a file by the name of `filename` already
exists on disk, should we overwrite it?
Other Parameters
----------------
kwargs : dict
Other keyword parameters are passed directly to the file object
Returns
-------
fileobject : object
Open trajectory file, whose type is determined by the filename
extension
See Also
--------
load, ArcTrajectoryFile, BINPOSTrajectoryFile, DCDTrajectoryFile,
HDF5TrajectoryFile, LH5TrajectoryFile, MDCRDTrajectoryFile,
NetCDFTrajectoryFile, PDBTrajectoryFile, TRRTrajectoryFile,
XTCTrajectoryFile, TNGTrajectoryFile
"""
extension = _get_extension(filename)
try:
loader = FormatRegistry.fileobjects[extension]
except KeyError:
raise IOError('Sorry, no loader for filename=%s (extension=%s) '
'was found. I can only load files with extensions in %s'
% (filename, extension, FormatRegistry.fileobjects.keys()))
return loader(filename, mode=mode, force_overwrite=force_overwrite, **kwargs)
def load_frame(filename, index, top=None, atom_indices=None, **kwargs):
"""Load a single frame from a trajectory file
Parameters
----------
filename : str
Path to the trajectory file on disk
index : int
Load the `index`-th frame from the specified file
top : {str, Trajectory, Topology}
Most trajectory formats do not contain topology information. Pass in
either the path to a RCSB PDB file, a trajectory, or a topology to
supply this information.
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file. These indices are zero-based (not 1 based, as used by the PDB
format).
Examples
--------
>>> import mdtraj as md
>>> first_frame = md.load_frame('traj.h5', 0)
>>> print first_frame
<mdtraj.Trajectory with 1 frames, 22 atoms>
See Also
--------
load, load_frame
Returns
-------
trajectory : md.Trajectory
The resulting conformation, as an md.Trajectory object containing
a single frame.
"""
extension = _get_extension(filename)
try:
loader = FormatRegistry.loaders[extension]
except KeyError:
raise IOError('Sorry, no loader for filename=%s (extension=%s) '
'was found. I can only load files with extensions in %s'
% (filename, extension, FormatRegistry.loaders.keys()))
kwargs['atom_indices'] = atom_indices
if extension not in _TOPOLOGY_EXTS:
kwargs['top'] = top
if loader.__name__ not in ['load_dtr']:
_assert_files_exist(filename)
else:
_assert_files_or_dirs_exist(filename)
return loader(filename, frame=index, **kwargs)
def load(filename_or_filenames, discard_overlapping_frames=False, **kwargs):
"""Load a trajectory from one or more files on disk.
This function dispatches to one of the specialized trajectory loaders based
on the extension on the filename. Because different trajectory formats save
different information on disk, the specific keyword argument options supported
depend on the specific loaded.
Parameters
----------
filename_or_filenames : {str, list of strings}
Filename or list of filenames containing trajectory files of a single format.
discard_overlapping_frames : bool, default=False
Look for overlapping frames between the last frame of one filename and
the first frame of a subsequent filename and discard them
Other Parameters
----------------
top : {str, Trajectory, Topology}
Most trajectory formats do not contain topology information. Pass in
either the path to a RCSB PDB file, a trajectory, or a topology to
supply this information. This option is not required for the .h5, .lh5,
and .pdb formats, which already contain topology information.
stride : int, default=None
Only read every stride-th frame
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file. This may be slightly slower than the standard read because it
requires an extra copy, but will save memory.
See Also
--------
load_frame, iterload
Examples
--------
>>> import mdtraj as md
>>> traj = md.load('output.xtc', top='topology.pdb')
>>> print traj
<mdtraj.Trajectory with 500 frames, 423 atoms at 0x110740a90>
>>> traj2 = md.load('output.xtc', stride=2, top='topology.pdb')
>>> print traj2
<mdtraj.Trajectory with 250 frames, 423 atoms at 0x11136e410>
>>> traj3 = md.load_hdf5('output.xtc', atom_indices=[0,1] top='topology.pdb')
>>> print traj3
<mdtraj.Trajectory with 500 frames, 2 atoms at 0x18236e4a0>
Returns
-------
trajectory : md.Trajectory
The resulting trajectory, as an md.Trajectory object.
"""
if "top" in kwargs: # If applicable, pre-loads the topology from PDB for major performance boost.
topkwargs = kwargs.copy()
topkwargs.pop("top", None)
kwargs["top"] = _parse_topology(kwargs["top"], **topkwargs)
# grab the extension of the filename
if isinstance(filename_or_filenames, string_types): # If a single filename
extension = _get_extension(filename_or_filenames)
filename = filename_or_filenames
else: # If multiple filenames, take the first one.
extensions = [_get_extension(f) for f in filename_or_filenames]
if len(set(extensions)) == 0:
raise ValueError('No trajectories specified. '
'filename_or_filenames was an empty list')
elif len(set(extensions)) > 1:
raise TypeError("Each filename must have the same extension. "
"Received: %s" % ', '.join(set(extensions)))
else:
# we know the topology is equal because we sent the same topology
# kwarg in. Therefore, we explictly throw away the topology on all
# but the first trajectory and use check_topology=False on the join.
# Throwing the topology away explictly allows a large number of pdb
# files to be read in without using ridiculous amounts of memory.
trajectories = []
for (i, f) in enumerate(filename_or_filenames):
t = load(f, **kwargs)
if i != 0:
t.topology = None
trajectories.append(t)
return join(trajectories, check_topology=False,
discard_overlapping_frames=discard_overlapping_frames)
try:
#loader = _LoaderRegistry[extension][0]
loader = FormatRegistry.loaders[extension]
except KeyError:
raise IOError('Sorry, no loader for filename=%s (extension=%s) '
'was found. I can only load files '
'with extensions in %s' % (filename, extension, FormatRegistry.loaders.keys()))
if extension in _TOPOLOGY_EXTS:
# this is a little hack that makes calling load() more predictable. since
# most of the loaders take a kwargs "top" except for load_hdf5, (since
# it saves the topology inside the file), we often end up calling
# load_hdf5 via this function with the top kwarg specified. but then
# there would be a signature binding error. it's easier just to ignore
# it.
if 'top' in kwargs:
warnings.warn('top= kwarg ignored since file contains topology information')
kwargs.pop('top', None)
else:
# standard_names is a valid keyword argument only for files containing topologies
kwargs.pop('standard_names', None)
if loader.__name__ not in ['load_dtr']:
_assert_files_exist(filename_or_filenames)
else:
_assert_files_or_dirs_exist(filename_or_filenames)
value = loader(filename, **kwargs)
return value
def iterload(filename, chunk=100, **kwargs):
"""An iterator over a trajectory from one or more files on disk, in fragments
This may be more memory efficient than loading an entire trajectory at
once
Parameters
----------
filename : str
Path to the trajectory file on disk
chunk : int
Number of frames to load at once from disk per iteration. If 0, load all.
Other Parameters
----------------
top : {str, Trajectory, Topology}
Most trajectory formats do not contain topology information. Pass in
either the path to a RCSB PDB file, a trajectory, or a topology to
supply this information. This option is not required for the .h5, .lh5,
and .pdb formats, which already contain topology information.
stride : int, default=None
Only read every stride-th frame.
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file. This may be slightly slower than the standard read because it
requires an extra copy, but will save memory.
skip : int, default=0
Skip first n frames.
See Also
--------
load, load_frame
Examples
--------
>>> import mdtraj as md
>>> for chunk in md.iterload('output.xtc', top='topology.pdb')
... print chunk
<mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90>
<mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90>
<mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90>
<mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90>
<mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90>
"""
stride = kwargs.pop('stride', 1)
atom_indices = cast_indices(kwargs.pop('atom_indices', None))
top = kwargs.pop('top', None)
skip = kwargs.pop('skip', 0)
extension = _get_extension(filename)
if extension not in _TOPOLOGY_EXTS:
topology = _parse_topology(top)
if chunk == 0:
# If chunk was 0 then we want to avoid filetype-specific code
# in case of undefined behavior in various file parsers.
# TODO: this will first apply stride, then skip!
if extension not in _TOPOLOGY_EXTS:
kwargs['top'] = top
yield load(filename, **kwargs)[skip:]
elif extension in ('.pdb', '.pdb.gz'):
# the PDBTrajectortFile class doesn't follow the standard API. Fixing it
# to support iterload could be worthwhile, but requires a deep refactor.
t = load(filename, stride=stride, atom_indices=atom_indices)
for i in range(0, len(t), chunk):
yield t[i:i+chunk]
else:
with (lambda x: open(x, n_atoms=topology.n_atoms)
if extension in ('.crd', '.mdcrd')
else open(filename))(filename) as f:
if skip > 0:
f.seek(skip)
while True:
if extension not in _TOPOLOGY_EXTS:
traj = f.read_as_traj(topology, n_frames=chunk*stride, stride=stride, atom_indices=atom_indices, **kwargs)
else:
traj = f.read_as_traj(n_frames=chunk*stride, stride=stride, atom_indices=atom_indices, **kwargs)
if len(traj) == 0:
raise StopIteration()
yield traj
def join(trajs, check_topology=True, discard_overlapping_frames=False):
"""Concatenate multiple trajectories into one long trajectory
Parameters
----------
trajs : iterable of trajectories
Combine these into one trajectory
check_topology : bool
Make sure topologies match before joining
discard_overlapping_frames : bool
Check for overlapping frames and discard
"""
return functools.reduce(
lambda x, y:
x.join(y, check_topology=check_topology,
discard_overlapping_frames=discard_overlapping_frames),
trajs
)
class Trajectory(object):
"""Container object for a molecular dynamics trajectory
A Trajectory represents a collection of one or more molecular structures,
generally (but not necessarily) from a molecular dynamics trajectory. The
Trajectory stores a number of fields describing the system through time,
including the cartesian coordinates of each atoms (``xyz``), the topology
of the molecular system (``topology``), and information about the
unitcell if appropriate (``unitcell_vectors``, ``unitcell_length``,
``unitcell_angles``).
A Trajectory should generally be constructed by loading a file from disk.
Trajectories can be loaded from (and saved to) the PDB, XTC, TRR, DCD,
binpos, NetCDF or MDTraj HDF5 formats.
Trajectory supports fancy indexing, so you can extract one or more frames
from a Trajectory as a separate trajectory. For example, to form a
trajectory with every other frame, you can slice with ``traj[::2]``.
Trajectory uses the nanometer, degree & picosecond unit system.
Examples
--------
>>> # loading a trajectory
>>> import mdtraj as md
>>> md.load('trajectory.xtc', top='native.pdb')
<mdtraj.Trajectory with 1000 frames, 22 atoms at 0x1058a73d0>
>>> # slicing a trajectory
>>> t = md.load('trajectory.h5')
>>> print(t)
<mdtraj.Trajectory with 100 frames, 22 atoms>
>>> print(t[::2])
<mdtraj.Trajectory with 50 frames, 22 atoms>
>>> # calculating the average distance between two atoms
>>> import mdtraj as md
>>> import numpy as np
>>> t = md.load('trajectory.h5')
>>> np.mean(np.sqrt(np.sum((t.xyz[:, 0, :] - t.xyz[:, 21, :])**2, axis=1)))
See Also
--------
mdtraj.load : High-level function that loads files and returns an ``md.Trajectory``
Attributes
----------
n_frames : int
n_atoms : int
n_residues : int
time : np.ndarray, shape=(n_frames,)
timestep : float
topology : md.Topology
top : md.Topology
xyz : np.ndarray, shape=(n_frames, n_atoms, 3)
unitcell_vectors : {np.ndarray, shape=(n_frames, 3, 3), None}
unitcell_lengths : {np.ndarray, shape=(n_frames, 3), None}
unitcell_angles : {np.ndarray, shape=(n_frames, 3), None}
"""
# this is NOT configurable. if it's set to something else, things will break
# (thus why I make it private)
_distance_unit = 'nanometers'
@property
def topology(self):
"""Topology of the system, describing the organization of atoms into residues, bonds, etc
Returns
-------
topology : md.Topology
The topology object, describing the organization of atoms into
residues, bonds, etc
"""
return self._topology
@topology.setter
def topology(self, value):
"Set the topology of the system, describing the organization of atoms into residues, bonds, etc"
# todo: more typechecking
self._topology = value
@property
def n_frames(self):
"""Number of frames in the trajectory
Returns
-------
n_frames : int
The number of frames in the trajectory
"""
return self._xyz.shape[0]
@property
def n_atoms(self):
"""Number of atoms in the trajectory
Returns
-------
n_atoms : int
The number of atoms in the trajectory
"""
return self._xyz.shape[1]
@property
def n_residues(self):
"""Number of residues (amino acids) in the trajectory
Returns
-------
n_residues : int
The number of residues in the trajectory's topology
"""
if self.top is None:
return 0
return sum([1 for r in self.top.residues])
@property
def n_chains(self):
"""Number of chains in the trajectory
Returns
-------
n_chains : int
The number of chains in the trajectory's topology
"""
if self.top is None:
return 0
return sum([1 for c in self.top.chains])
@property
def top(self):
"""Alias for self.topology, describing the organization of atoms into residues, bonds, etc
Returns
-------
topology : md.Topology
The topology object, describing the organization of atoms into
residues, bonds, etc
"""
return self._topology
@top.setter
def top(self, value):
"Set the topology of the system, describing the organization of atoms into residues, bonds, etc"
# todo: more typechecking
self._topology = value
@property
def timestep(self):
"""Timestep between frames, in picoseconds
Returns
-------
timestep : float
The timestep between frames, in picoseconds.
"""
if self.n_frames <= 1:
raise(ValueError("Cannot calculate timestep if trajectory has one frame."))
return self._time[1] - self._time[0]
@property
def time(self):
"""The simulation time corresponding to each frame, in picoseconds
Returns
-------
time : np.ndarray, shape=(n_frames,)
The simulation time corresponding to each frame, in picoseconds
"""
return self._time
@time.setter
def time(self, value):
"Set the simulation time corresponding to each frame, in picoseconds"
if isinstance(value, list):
value = np.array(value)
if np.isscalar(value) and self.n_frames == 1:
value = np.array([value])
elif not value.shape == (self.n_frames,):
raise ValueError('Wrong shape. Got %s, should be %s' % (value.shape,
(self.n_frames)))
self._time = value
@property
def unitcell_vectors(self):
"""The vectors that define the shape of the unit cell in each frame
Returns
-------
vectors : np.ndarray, shape(n_frames, 3, 3)
Vectors defining the shape of the unit cell in each frame.
The semantics of this array are that the shape of the unit cell
in frame ``i`` are given by the three vectors, ``value[i, 0, :]``,
``value[i, 1, :]``, and ``value[i, 2, :]``.
"""
if self._unitcell_lengths is None or self._unitcell_angles is None:
return None
v1, v2, v3 = lengths_and_angles_to_box_vectors(
self._unitcell_lengths[:, 0], # a
self._unitcell_lengths[:, 1], # b
self._unitcell_lengths[:, 2], # c
self._unitcell_angles[:, 0], # alpha
self._unitcell_angles[:, 1], # beta
self._unitcell_angles[:, 2], # gamma
)
return np.swapaxes(np.dstack((v1, v2, v3)), 1, 2)
@unitcell_vectors.setter
def unitcell_vectors(self, vectors):
"""Set the three vectors that define the shape of the unit cell
Parameters
----------
vectors : tuple of three arrays, each of shape=(n_frames, 3)
The semantics of this array are that the shape of the unit cell
in frame ``i`` are given by the three vectors, ``value[i, 0, :]``,
``value[i, 1, :]``, and ``value[i, 2, :]``.
"""
if vectors is None or np.all(np.abs(vectors) < 1e-15):
self._unitcell_lengths = None
self._unitcell_angles = None
return
if not len(vectors) == len(self):
raise TypeError('unitcell_vectors must be the same length as '
'the trajectory. you provided %s' % str(vectors))
v1 = vectors[:, 0, :]
v2 = vectors[:, 1, :]
v3 = vectors[:, 2, :]
a, b, c, alpha, beta, gamma = box_vectors_to_lengths_and_angles(v1, v2, v3)
self._unitcell_lengths = np.vstack((a, b, c)).T
self._unitcell_angles = np.vstack((alpha, beta, gamma)).T
@property
def unitcell_volumes(self):
"""Volumes of unit cell for each frame.
Returns
-------
volumes : {np.ndarray, shape=(n_frames), None}
Volumes of the unit cell in each frame, in nanometers^3, or None
if the Trajectory contains no unitcell information.
"""
if self.unitcell_lengths is not None:
return np.array(list(map(np.linalg.det, self.unitcell_vectors)))
else:
return None
@property
def unitcell_lengths(self):
"""Lengths that define the shape of the unit cell in each frame.
Returns
-------
lengths : {np.ndarray, shape=(n_frames, 3), None}
Lengths of the unit cell in each frame, in nanometers, or None
if the Trajectory contains no unitcell information.
"""
return self._unitcell_lengths
@property
def unitcell_angles(self):
"""Angles that define the shape of the unit cell in each frame.
Returns
-------
lengths : np.ndarray, shape=(n_frames, 3)
The angles between the three unitcell vectors in each frame,
``alpha``, ``beta``, and ``gamma``. ``alpha' gives the angle
between vectors ``b`` and ``c``, ``beta`` gives the angle between
vectors ``c`` and ``a``, and ``gamma`` gives the angle between
vectors ``a`` and ``b``. The angles are in degrees.
"""
return self._unitcell_angles
@unitcell_lengths.setter
def unitcell_lengths(self, value):
"""Set the lengths that define the shape of the unit cell in each frame
Parameters
----------
value : np.ndarray, shape=(n_frames, 3)
The distances ``a``, ``b``, and ``c`` that define the shape of the
unit cell in each frame, or None
"""
self._unitcell_lengths = ensure_type(value, np.float32, 2,
'unitcell_lengths', can_be_none=True, shape=(len(self), 3),
warn_on_cast=False, add_newaxis_on_deficient_ndim=True)
@unitcell_angles.setter
def unitcell_angles(self, value):
"""Set the lengths that define the shape of the unit cell in each frame
Parameters
----------
value : np.ndarray, shape=(n_frames, 3)
The angles ``alpha``, ``beta`` and ``gamma`` that define the
shape of the unit cell in each frame. The angles should be in
degrees.
"""
self._unitcell_angles = ensure_type(value, np.float32, 2,
'unitcell_angles', can_be_none=True, shape=(len(self), 3),
warn_on_cast=False, add_newaxis_on_deficient_ndim=True)
@property
def xyz(self):
"""Cartesian coordinates of each atom in each simulation frame
Returns
-------
xyz : np.ndarray, shape=(n_frames, n_atoms, 3)
A three dimensional numpy array, with the cartesian coordinates
of each atoms in each frame.
"""
return self._xyz
@xyz.setter
def xyz(self, value):
"Set the cartesian coordinates of each atom in each simulation frame"
if self.top is not None:
# if we have a topology and its not None
shape = (None, self.topology._numAtoms, 3)
else:
shape = (None, None, 3)
value = ensure_type(value, np.float32, 3, 'xyz', shape=shape,
warn_on_cast=False, add_newaxis_on_deficient_ndim=True)
self._xyz = value
self._rmsd_traces = None
def _string_summary_basic(self):
"""Basic summary of traj in string form."""
unitcell_str = 'and unitcells' if self._have_unitcell else 'without unitcells'
value = "mdtraj.Trajectory with %d frames, %d atoms, %d residues, %s" % (
self.n_frames, self.n_atoms, self.n_residues, unitcell_str)
return value
def __len__(self):
return self.n_frames
def __add__(self, other):
"Concatenate two trajectories"
return self.join(other)
def __str__(self):
return "<%s>" % (self._string_summary_basic())
def __repr__(self):
return "<%s at 0x%02x>" % (self._string_summary_basic(), id(self))
def __hash__(self):
hash_value = hash(self.top)
# combine with hashes of arrays
hash_value ^= _hash_numpy_array(self._xyz)
hash_value ^= _hash_numpy_array(self.time)
hash_value ^= _hash_numpy_array(self._unitcell_lengths)
hash_value ^= _hash_numpy_array(self._unitcell_angles)
return hash_value
def __eq__(self, other):
return self.__hash__() == other.__hash__()
# def describe(self):
# """Diagnostic summary statistics on the trajectory"""
# # What information do we want to display?
# # Goals: easy to figure out if a trajectory is blowing up or contains
# # bad data, easy to diagonose other problems. Generally give a
# # high-level description of the data in the trajectory.
# # Possibly show std. dev. of differnt coordinates in the trajectory
# # or maybe its RMSD drift or something?
# # Also, check for any NaNs or Infs in the data. Or other common issues
# # like that?
# # Note that pandas.DataFrame has a describe() method, which gives
# # min/max/mean/std.dev./percentiles of each column in a DataFrame.
# raise NotImplementedError()
def superpose(self, reference, frame=0, atom_indices=None,
ref_atom_indices=None, parallel=True):
"""Superpose each conformation in this trajectory upon a reference
Parameters
----------
reference : md.Trajectory
Align self to a particular frame in `reference`
frame : int
The index of the conformation in `reference` to align to.
atom_indices : array_like, or None
The indices of the atoms to superpose. If not
supplied, all atoms will be used.
ref_atom_indices : array_like, or None
Use these atoms on the reference structure. If not supplied,
the same atom indices will be used for this trajectory and the
reference one.
parallel : bool
Use OpenMP to run the superposition in parallel over multiple cores
Returns
-------
self
"""
if atom_indices is None:
atom_indices = slice(None)
if ref_atom_indices is None:
ref_atom_indices = atom_indices
if not isinstance(ref_atom_indices, slice) and (
len(ref_atom_indices) != len(atom_indices)):
raise ValueError("Number of atoms must be consistent!")
n_frames = self.xyz.shape[0]
self_align_xyz = np.asarray(self.xyz[:, atom_indices, :], order='c')
self_displace_xyz = np.asarray(self.xyz, order='c')
ref_align_xyz = np.array(reference.xyz[frame, ref_atom_indices, :],
copy=True, order='c').reshape(1, -1, 3)
offset = np.mean(self_align_xyz, axis=1, dtype=np.float64).reshape(n_frames, 1, 3)
self_align_xyz -= offset
if self_align_xyz.ctypes.data != self_displace_xyz.ctypes.data:
# when atom_indices is None, these two arrays alias the same memory
# so we only need to do the centering once
self_displace_xyz -= offset
ref_offset = ref_align_xyz[0].astype('float64').mean(0)
ref_align_xyz[0] -= ref_offset
self_g = np.einsum('ijk,ijk->i', self_align_xyz, self_align_xyz)
ref_g = np.einsum('ijk,ijk->i', ref_align_xyz , ref_align_xyz)
_rmsd.superpose_atom_major(
ref_align_xyz, self_align_xyz, ref_g, self_g, self_displace_xyz,
0, parallel=parallel)
self_displace_xyz += ref_offset
self.xyz = self_displace_xyz
return self
def join(self, other, check_topology=True, discard_overlapping_frames=False):
"""Join two trajectories together along the time/frame axis.
This method joins trajectories along the time axis, giving a new trajectory
of length equal to the sum of the lengths of `self` and `other`.
It can also be called by using `self + other`
Parameters
----------
other : Trajectory or list of Trajectory
One or more trajectories to join with this one. These trajectories
are *appended* to the end of this trajectory.
check_topology : bool
Ensure that the topology of `self` and `other` are identical before
joining them. If false, the resulting trajectory will have the
topology of `self`.
discard_overlapping_frames : bool, optional
If True, compare coordinates at trajectory edges to discard overlapping
frames. Default: False.
See Also
--------
stack : join two trajectories along the atom axis
"""
if isinstance(other, Trajectory):
other = [other]
if isinstance(other, list):
if not all(isinstance(o, Trajectory) for o in other):
raise TypeError('You can only join Trajectory instances')
if not all(self.n_atoms == o.n_atoms for o in other):
raise ValueError('Number of atoms in self (%d) is not equal '
'to number of atoms in other' % (self.n_atoms))
if check_topology and not all(self.topology == o.topology for o in other):
raise ValueError('The topologies of the Trajectories are not the same')
if not all(self._have_unitcell == o._have_unitcell for o in other):
raise ValueError('Mixing trajectories with and without unitcell')
else:
raise TypeError('`other` must be a list of Trajectory. You supplied %d' % type(other))
# list containing all of the trajs to merge, including self
trajectories = [self] + other
if discard_overlapping_frames:
for i in range(len(trajectories)-1):
# last frame of trajectory i
x0 = trajectories[i].xyz[-1]
# first frame of trajectory i+1
x1 = trajectories[i + 1].xyz[0]
# check that all atoms are within 2e-3 nm
# (this is kind of arbitrary)
if np.all(np.abs(x1 - x0) < 2e-3):
trajectories[i] = trajectories[i][:-1]
xyz = np.concatenate([t.xyz for t in trajectories])
time = np.concatenate([t.time for t in trajectories])
angles = lengths = None
if self._have_unitcell:
angles = np.concatenate([t.unitcell_angles for t in trajectories])
lengths = np.concatenate([t.unitcell_lengths for t in trajectories])
# use this syntax so that if you subclass Trajectory,
# the subclass's join() will return an instance of the subclass
return self.__class__(xyz, deepcopy(self._topology), time=time,
unitcell_lengths=lengths, unitcell_angles=angles)
def stack(self, other):
"""Stack two trajectories along the atom axis
This method joins trajectories along the atom axis, giving a new trajectory
with a number of atoms equal to the sum of the number of atoms in
`self` and `other`.
Notes
-----
The resulting trajectory will have the unitcell and time information
the left operand.
Examples
--------
>>> t1 = md.load('traj1.h5')
>>> t2 = md.load('traj2.h5')
>>> # even when t2 contains no unitcell information
>>> t2.unitcell_vectors = None
>>> stacked = t1.stack(t2)
>>> # the stacked trajectory inherits the unitcell information
>>> # from the first trajectory
>>> np.all(stacked.unitcell_vectors == t1.unitcell_vectors)
True
Parameters
----------
other : Trajectory
The other trajectory to join
See Also
--------
join : join two trajectories along the time/frame axis.
"""
if not isinstance(other, Trajectory):
raise TypeError('You can only stack two Trajectory instances')
if self.n_frames != other.n_frames:
raise ValueError('Number of frames in self (%d) is not equal '
'to number of frames in other (%d)' % (self.n_frames, other.n_frames))
if self.topology is not None:
topology = self.topology.join(other.topology)
else:
topology = None
xyz = np.hstack((self.xyz, other.xyz))
return self.__class__(xyz=xyz, topology=topology, unitcell_angles=self.unitcell_angles,
unitcell_lengths=self.unitcell_lengths, time=self.time)
def __getitem__(self, key):
"Get a slice of this trajectory"
return self.slice(key)
def slice(self, key, copy=True):
"""Slice trajectory, by extracting one or more frames into a separate object
This method can also be called using index bracket notation, i.e
`traj[1] == traj.slice(1)`
Parameters
----------
key : {int, np.ndarray, slice}
The slice to take. Can be either an int, a list of ints, or a slice
object.
copy : bool, default=True
Copy the arrays after slicing. If you set this to false, then if
you modify a slice, you'll modify the original array since they
point to the same data.
"""
xyz = self.xyz[key]
time = self.time[key]
unitcell_lengths, unitcell_angles = None, None
if self.unitcell_angles is not None:
unitcell_angles = self.unitcell_angles[key]
if self.unitcell_lengths is not None:
unitcell_lengths = self.unitcell_lengths[key]
if copy:
xyz = xyz.copy()
time = time.copy()
topology = deepcopy(self._topology)
if self.unitcell_angles is not None:
unitcell_angles = unitcell_angles.copy()
if self.unitcell_lengths is not None:
unitcell_lengths = unitcell_lengths.copy()
else:
topology = self._topology
newtraj = self.__class__(
xyz, topology, time, unitcell_lengths=unitcell_lengths,
unitcell_angles=unitcell_angles)
if self._rmsd_traces is not None:
newtraj._rmsd_traces = np.array(self._rmsd_traces[key],
ndmin=1, copy=True)
return newtraj
def __init__(self, xyz, topology, time=None, unitcell_lengths=None, unitcell_angles=None):
# install the topology into the object first, so that when setting
# the xyz, we can check that it lines up (e.g. n_atoms), with the topology
self.topology = topology
self.xyz = xyz
# _rmsd_traces are the inner product of each centered conformation,
# which are required for computing RMSD. Normally these values are
# calculated on the fly in the cython code (rmsd/_rmsd.pyx), but
# optionally, we enable the use precomputed values which can speed
# up the calculation (useful for clustering), but potentially be unsafe
# if self._xyz is modified without a corresponding change to
# self._rmsd_traces. This array is populated computed by
# center_conformations, and no other methods should really touch it.
self._rmsd_traces = None
# box has no default, it'll just be none normally
self.unitcell_lengths = unitcell_lengths
self.unitcell_angles = unitcell_angles
# time will take the default 1..N
self._time_default_to_arange = (time is None)
if time is None:
time = np.arange(len(self.xyz))
self.time = time
if (topology is not None) and (topology._numAtoms != self.n_atoms):
raise ValueError("Number of atoms in xyz (%s) and "
"in topology (%s) don't match" % (self.n_atoms, topology._numAtoms))
def openmm_positions(self, frame):
"""OpenMM-compatable positions of a single frame.
Examples
--------
>>> t = md.load('trajectory.h5')
>>> context.setPositions(t.openmm_positions(0))
Parameters
----------
frame : int
The index of frame of the trajectory that you wish to extract
Returns
-------
positions : list
The cartesian coordinates of specific trajectory frame, formatted
for input to OpenMM
"""
from simtk.openmm import Vec3
from simtk.unit import nanometer
Pos = []
for xyzi in self.xyz[frame]:
Pos.append(Vec3(xyzi[0], xyzi[1], xyzi[2]))
return Pos * nanometer
def openmm_boxes(self, frame):
"""OpenMM-compatable box vectors of a single frame.
Examples
--------
>>> t = md.load('trajectory.h5')
>>> context.setPeriodicBoxVectors(t.openmm_positions(0))
Parameters
----------
frame : int
Return box for this single frame.
Returns
-------
box : tuple
The periodic box vectors for this frame, formatted for input to
OpenMM.
"""
from simtk.openmm import Vec3
from simtk.unit import nanometer
vectors = self.unitcell_vectors[frame]
if vectors is None:
raise ValueError("this trajectory does not contain box size information")
v1, v2, v3 = vectors
return (Vec3(*v1), Vec3(*v2), Vec3(*v3)) * nanometer
@staticmethod
# im not really sure if the load function should be just a function or a method on the class
# so effectively, lets make it both?
def load(filenames, **kwargs):
"""Load a trajectory from disk
Parameters
----------
filenames : {str, [str]}
Either a string or list of strings
Other Parameters
----------------
As requested by the various load functions -- it depends on the extension
"""
return load(filenames, **kwargs)
def _savers(self):
"""Return a dictionary mapping extensions to the appropriate format-specific save function"""
return {'.xtc': self.save_xtc,
'.trr': self.save_trr,
'.pdb': self.save_pdb,
'.pdb.gz': self.save_pdb,
'.dcd': self.save_dcd,
'.h5': self.save_hdf5,
'.binpos': self.save_binpos,
'.nc': self.save_netcdf,
'.netcdf': self.save_netcdf,
'.ncrst' : self.save_netcdfrst,
'.crd': self.save_mdcrd,
'.mdcrd': self.save_mdcrd,
'.ncdf': self.save_netcdf,
'.lh5': self.save_lh5,
'.lammpstrj': self.save_lammpstrj,
'.xyz': self.save_xyz,
'.xyz.gz': self.save_xyz,
'.gro': self.save_gro,
'.rst7' : self.save_amberrst7,
'.tng' : self.save_tng,
}
def save(self, filename, **kwargs):
"""Save trajectory to disk, in a format determined by the filename extension
Parameters
----------
filename : str
filesystem path in which to save the trajectory. The extension will
be parsed and will control the format.
Other Parameters
----------------
lossy : bool
For .h5 or .lh5, whether or not to use compression.
no_models: bool
For .pdb. TODO: Document this?
force_overwrite : bool
If `filename` already exists, overwrite it.
"""
# grab the extension of the filename
extension = _get_extension(filename)
savers = self._savers()
try:
saver = savers[extension]
except KeyError:
raise IOError('Sorry, no saver for filename=%s (extension=%s) '
'was found. I can only save files '
'with extensions in %s' % (filename, extension, savers.keys()))
# run the saver, and return whatever output it gives
return saver(filename, **kwargs)
def save_hdf5(self, filename, force_overwrite=True):
"""Save trajectory to MDTraj HDF5 format
Parameters
----------
filename : str
filesystem path in which to save the trajectory
force_overwrite : bool, default=True
Overwrite anything that exists at filename, if its already there
"""
with HDF5TrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f:
f.write(coordinates=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit),
time=self.time,
cell_lengths=in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit),
cell_angles=self.unitcell_angles)
f.topology = self.topology
def save_lammpstrj(self, filename, force_overwrite=True):
"""Save trajectory to LAMMPS custom dump format
Parameters
----------
filename : str
filesystem path in which to save the trajectory
force_overwrite : bool, default=True
Overwrite anything that exists at filename, if its already there
"""
with LAMMPSTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f:
f.write(xyz=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit),
cell_lengths=in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit),
cell_angles=self.unitcell_angles)
def save_xyz(self, filename, force_overwrite=True):
"""Save trajectory to .xyz format.
Parameters
----------
filename : str
filesystem path in which to save the trajectory
force_overwrite : bool, default=True
Overwrite anything that exists at filename, if its already there
"""
with XYZTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f:
f.write(xyz=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit),
types=[a.name for a in self.top.atoms])
def save_pdb(self, filename, force_overwrite=True, bfactors=None):
"""Save trajectory to RCSB PDB format
Parameters
----------
filename : str
filesystem path in which to save the trajectory
force_overwrite : bool, default=True
Overwrite anything that exists at filename, if its already there
bfactors : array_like, default=None, shape=(n_frames, n_atoms) or (n_atoms,)
Save bfactors with pdb file. If the array is two dimensional it should
contain a bfactor for each atom in each frame of the trajectory.
Otherwise, the same bfactor will be saved in each frame.
"""
self._check_valid_unitcell()
if not bfactors is None:
if len(np.array(bfactors).shape) == 1:
if len(bfactors) != self.n_atoms:
raise ValueError("bfactors %s should be shaped as (n_frames, n_atoms) or (n_atoms,)" % str(np.array(bfactors).shape))
bfactors = [bfactors] * self.n_frames
else:
if np.array(bfactors).shape != (self.n_frames, self.n_atoms):
raise ValueError("bfactors %s should be shaped as (n_frames, n_atoms) or (n_atoms,)" % str(np.array(bfactors).shape))
else:
bfactors = [None] * self.n_frames
with PDBTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f:
for i in xrange(self.n_frames):
if self._have_unitcell:
f.write(in_units_of(self._xyz[i], Trajectory._distance_unit, f.distance_unit),
self.topology,
modelIndex=i,
bfactors=bfactors[i],
unitcell_lengths=in_units_of(self.unitcell_lengths[i], Trajectory._distance_unit, f.distance_unit),
unitcell_angles=self.unitcell_angles[i])
else:
f.write(in_units_of(self._xyz[i], Trajectory._distance_unit, f.distance_unit),
self.topology,
modelIndex=i,
bfactors=bfactors[i])
def save_xtc(self, filename, force_overwrite=True):
"""Save trajectory to Gromacs XTC format
Parameters
----------
filename : str
filesystem path in which to save the trajectory
force_overwrite : bool, default=True
Overwrite anything that exists at filename, if its already there
"""
with XTCTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f:
f.write(xyz=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit),
time=self.time,
box=in_units_of(self.unitcell_vectors, Trajectory._distance_unit, f.distance_unit))
def save_trr(self, filename, force_overwrite=True):
"""Save trajectory to Gromacs TRR format
Notes
-----
Only the xyz coordinates and the time are saved, the velocities
and forces in the trr will be zeros
Parameters
----------
filename : str
filesystem path in which to save the trajectory
force_overwrite : bool, default=True
Overwrite anything that exists at filename, if its already there
"""
with TRRTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f:
f.write(xyz=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit),
time=self.time,
box=in_units_of(self.unitcell_vectors, Trajectory._distance_unit, f.distance_unit))
def save_dcd(self, filename, force_overwrite=True):
"""Save trajectory to CHARMM/NAMD DCD format
Parameters
----------
filename : str
filesystem path in which to save the trajectory
force_overwrite : bool, default=True
Overwrite anything that exists at filenames, if its already there
"""
self._check_valid_unitcell()
with DCDTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f:
f.write(xyz=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit),
cell_lengths=in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit),
cell_angles=self.unitcell_angles)
def save_dtr(self, filename, force_overwrite=True):
"""Save trajectory to DESMOND DTR format
Parameters
----------
filename : str
filesystem path in which to save the trajectory
force_overwrite : bool, default=True
Overwrite anything that exists at filenames, if its already there
"""
self._check_valid_unitcell()
with DTRTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f:
f.write(xyz=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit),
cell_lengths=in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit),
cell_angles=self.unitcell_angles,
times=self.time)
def save_binpos(self, filename, force_overwrite=True):
"""Save trajectory to AMBER BINPOS format
Parameters
----------
filename : str
filesystem path in which to save the trajectory
force_overwrite : bool, default=True
Overwrite anything that exists at filename, if its already there
"""
with BINPOSTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f:
f.write(in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit))
def save_mdcrd(self, filename, force_overwrite=True):
"""Save trajectory to AMBER mdcrd format
Parameters
----------
filename : str
filesystem path in which to save the trajectory
force_overwrite : bool, default=True
Overwrite anything that exists at filename, if its already there
"""
self._check_valid_unitcell()
if self._have_unitcell:
if not np.all(self.unitcell_angles == 90):
raise ValueError('Only rectilinear boxes can be saved to mdcrd files. '
'Your angles are {}'.format(self.unitcell_angles))
with MDCRDTrajectoryFile(filename, mode='w', force_overwrite=force_overwrite) as f:
f.write(xyz=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit),
cell_lengths=in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit))
def save_netcdf(self, filename, force_overwrite=True):
"""Save trajectory in AMBER NetCDF format
Parameters
----------
filename : str
filesystem path in which to save the trajectory
force_overwrite : bool, default=True
Overwrite anything that exists at filename, if it's already there
"""
self._check_valid_unitcell()
with NetCDFTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f:
f.write(coordinates=in_units_of(self._xyz, Trajectory._distance_unit, NetCDFTrajectoryFile.distance_unit),
time=self.time,
cell_lengths=in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit),
cell_angles=self.unitcell_angles)
def save_netcdfrst(self, filename, force_overwrite=True):
"""Save trajectory in AMBER NetCDF restart format
Parameters
----------
filename : str
filesystem path in which to save the restart
force_overwrite : bool, default=True
Overwrite anything that exists at filename, if it's already there
Notes
-----
NetCDF restart files can only store a single frame. If only one frame
exists, "filename" will be written. Otherwise, "filename.#" will be
written, where # is a zero-padded number from 1 to the total number of
frames in the trajectory
"""
self._check_valid_unitcell()
if self.n_frames == 1:
with AmberNetCDFRestartFile(filename, 'w', force_overwrite=force_overwrite) as f:
coordinates = in_units_of(self._xyz, Trajectory._distance_unit,
AmberNetCDFRestartFile.distance_unit)
lengths = in_units_of(self.unitcell_lengths, Trajectory._distance_unit,
AmberNetCDFRestartFile.distance_unit)
f.write(coordinates=coordinates, time=self.time[0],
cell_lengths=lengths, cell_angles=self.unitcell_angles)
else:
fmt = '%s.%%0%dd' % (filename, len(str(self.n_frames)))
for i in xrange(self.n_frames):
with AmberNetCDFRestartFile(fmt % (i+1), 'w', force_overwrite=force_overwrite) as f:
coordinates = in_units_of(self._xyz, Trajectory._distance_unit,
AmberNetCDFRestartFile.distance_unit)
lengths = in_units_of(self.unitcell_lengths, Trajectory._distance_unit,
AmberNetCDFRestartFile.distance_unit)
f.write(coordinates=coordinates[i], time=self.time[i],
cell_lengths=lengths[i], cell_angles=self.unitcell_angles[i])
def save_amberrst7(self, filename, force_overwrite=True):
"""Save trajectory in AMBER ASCII restart format
Parameters
----------
filename : str
filesystem path in which to save the restart
force_overwrite : bool, default=True
Overwrite anything that exists at filename, if it's already there
Notes
-----
Amber restart files can only store a single frame. If only one frame
exists, "filename" will be written. Otherwise, "filename.#" will be
written, where # is a zero-padded number from 1 to the total number of
frames in the trajectory
"""
self._check_valid_unitcell()
if self.n_frames == 1:
with AmberRestartFile(filename, 'w', force_overwrite=force_overwrite) as f:
coordinates = in_units_of(self._xyz, Trajectory._distance_unit,
AmberRestartFile.distance_unit)
lengths = in_units_of(self.unitcell_lengths, Trajectory._distance_unit,
AmberRestartFile.distance_unit)
f.write(coordinates=coordinates, time=self.time[0],
cell_lengths=lengths, cell_angles=self.unitcell_angles)
else:
fmt = '%s.%%0%dd' % (filename, len(str(self.n_frames)))
for i in xrange(self.n_frames):
with AmberRestartFile(fmt % (i+1), 'w', force_overwrite=force_overwrite) as f:
coordinates = in_units_of(self._xyz, Trajectory._distance_unit,
AmberRestartFile.distance_unit)
lengths = in_units_of(self.unitcell_lengths, Trajectory._distance_unit,
AmberRestartFile.distance_unit)
f.write(coordinates=coordinates[i], time=self.time[0],
cell_lengths=lengths[i], cell_angles=self.unitcell_angles[i])
def save_lh5(self, filename, force_overwrite=True):
"""Save trajectory in deprecated MSMBuilder2 LH5 (lossy HDF5) format.
Parameters
----------
filename : str
filesystem path in which to save the trajectory
force_overwrite : bool, default=True
Overwrite anything that exists at filename, if it's already there
"""
with LH5TrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f:
f.write(coordinates=self.xyz)
f.topology = self.topology
def save_gro(self, filename, force_overwrite=True, precision=3):
"""Save trajectory in Gromacs .gro format
Parameters
----------
filename : str
Path to save the trajectory
force_overwrite : bool, default=True
Overwrite anything that exists at that filename if it exists
precision : int, default=3
The number of decimal places to use for coordinates in GRO file
"""
self._check_valid_unitcell()
with GroTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f:
f.write(self.xyz, self.topology, self.time, self.unitcell_vectors,
precision=precision)
def save_tng(self, filename, force_overwrite=True):
"""Save trajectory to Gromacs TNG format
Parameters
----------
filename : str
filesystem path in which to save the trajectory
force_overwrite : bool, default=True
Overwrite anything that exists at filename, if its already there
"""
self._check_valid_unitcell()
with TNGTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f:
f.write(self.xyz, time=self.time, box=self.unitcell_vectors)
def center_coordinates(self, mass_weighted=False):
"""Center each trajectory frame at the origin (0,0,0).
This method acts inplace on the trajectory. The centering can
be either uniformly weighted (mass_weighted=False) or weighted by
the mass of each atom (mass_weighted=True).
Parameters
----------
mass_weighted : bool, optional (default = False)
If True, weight atoms by mass when removing COM.
Returns
-------
self
"""
if mass_weighted and self.top is not None:
self.xyz -= distance.compute_center_of_mass(self)[:, np.newaxis, :]
else:
self._rmsd_traces = _rmsd._center_inplace_atom_major(self._xyz)
return self
@deprecated('restrict_atoms was replaced by atom_slice and will be removed in 2.0')
def restrict_atoms(self, atom_indices, inplace=True):
"""Retain only a subset of the atoms in a trajectory
Deletes atoms not in `atom_indices`, and re-indexes those that remain
Parameters
----------
atom_indices : array-like, dtype=int, shape=(n_atoms)
List of atom indices to keep.
inplace : bool, default=True
If ``True``, the operation is done inplace, modifying ``self``.
Otherwise, a copy is returned with the restricted atoms, and
``self`` is not modified.
Returns
-------
traj : md.Trajectory
The return value is either ``self``, or the new trajectory,
depending on the value of ``inplace``.
"""
return self.atom_slice(atom_indices, inplace=inplace)
def atom_slice(self, atom_indices, inplace=False):
"""Create a new trajectory from a subset of atoms
Parameters
----------
atom_indices : array-like, dtype=int, shape=(n_atoms)
List of indices of atoms to retain in the new trajectory.
inplace : bool, default=False
If ``True``, the operation is done inplace, modifying ``self``.
Otherwise, a copy is returned with the sliced atoms, and
``self`` is not modified.
Returns
-------
traj : md.Trajectory
The return value is either ``self``, or the new trajectory,
depending on the value of ``inplace``.
See Also
--------
stack : stack multiple trajectories along the atom axis
"""
xyz = np.array(self.xyz[:, atom_indices], order='C')
topology = None
if self._topology is not None:
topology = self._topology.subset(atom_indices)
if inplace:
if self._topology is not None:
self._topology = topology
self._xyz = xyz
return self
unitcell_lengths = unitcell_angles = None
if self._have_unitcell:
unitcell_lengths = self._unitcell_lengths.copy()
unitcell_angles = self._unitcell_angles.copy()
time = self._time.copy()
return Trajectory(xyz=xyz, topology=topology, time=time,
unitcell_lengths=unitcell_lengths,
unitcell_angles=unitcell_angles)
def remove_solvent(self, exclude=None, inplace=False):
"""
Create a new trajectory without solvent atoms
Parameters
----------
exclude : array-like, dtype=str, shape=(n_solvent_types)
List of solvent residue names to retain in the new trajectory.
inplace : bool, default=False
The return value is either ``self``, or the new trajectory,
depending on the value of ``inplace``.
Returns
-------
traj : md.Trajectory
The return value is either ``self``, or the new trajectory,
depending on the value of ``inplace``.
"""
solvent_types = list(_SOLVENT_TYPES)
if exclude is not None:
if isinstance(exclude, str):
raise TypeError('exclude must be array-like')
if not isinstance(exclude, Iterable):
raise TypeError('exclude is not iterable')
for type in exclude:
if type not in solvent_types:
raise ValueError(type + 'is not a valid solvent type')
solvent_types.remove(type)
atom_indices = [atom.index for atom in self.topology.atoms if
atom.residue.name not in solvent_types]
return self.atom_slice(atom_indices, inplace = inplace)
def smooth(self, width, order=3, atom_indices=None, inplace=False):
"""Smoothen a trajectory using a zero-delay Buttersworth filter. Please
note that for optimal results the trajectory should be properly aligned
prior to smoothing (see `md.Trajectory.superpose`).
Parameters
----------
width : int
This acts very similar to the window size in a moving average
smoother. In this implementation, the frequency of the low-pass
filter is taken to be two over this width, so it's like
"half the period" of the sinusiod where the filter starts
to kick in. Must be an integer greater than one.
order : int, optional, default=3
The order of the filter. A small odd number is recommended. Higher
order filters cutoff more quickly, but have worse numerical
properties.
atom_indices : array-like, dtype=int, shape=(n_atoms), default=None
List of indices of atoms to retain in the new trajectory.
Default is set to `None`, which applies smoothing to all atoms.
inplace : bool, default=False
The return value is either ``self``, or the new trajectory,
depending on the value of ``inplace``.
Returns
-------
traj : md.Trajectory
The return value is either ``self``, or the new smoothed trajectory,
depending on the value of ``inplace``.
References
----------
.. [1] "FiltFilt". Scipy Cookbook. SciPy. <http://www.scipy.org/Cookbook/FiltFilt>.
"""
from scipy.signal import lfilter, lfilter_zi, filtfilt, butter
if width < 2.0 or not isinstance(width, int):
raise ValueError('width must be an integer greater than 1.')
if not atom_indices:
atom_indices = range(self.n_atoms)
# find nearest odd integer
pad = int(np.ceil((width + 1)/2)*2 - 1)
# Use lfilter_zi to choose the initial condition of the filter.
b, a = butter(order, 2.0 / width)
zi = lfilter_zi(b, a)
xyz = self.xyz.copy()
for i in atom_indices:
for j in range(3):
signal = xyz[:, i, j]
padded = np.r_[signal[pad - 1: 0: -1], signal, signal[-1: -pad: -1]]
# Apply the filter to the width.
z, _ = lfilter(b, a, padded, zi=zi*padded[0])
# Apply the filter again, to have a result filtered at an order
# the same as filtfilt.
z2, _ = lfilter(b, a, z, zi=zi*z[0])
# Use filtfilt to apply the filter.
output = filtfilt(b, a, padded)
xyz[:, i, j] = output[(pad-1): -(pad-1)]
if not inplace:
return Trajectory(xyz=xyz, topology=self.topology,
time=self.time,
unitcell_lengths=self.unitcell_lengths,
unitcell_angles=self.unitcell_angles)
self.xyz = xyz
def _check_valid_unitcell(self):
"""Do some sanity checking on self.unitcell_lengths and self.unitcell_angles
"""
if self.unitcell_lengths is not None and self.unitcell_angles is None:
raise AttributeError('unitcell length data exists, but no angles')
if self.unitcell_lengths is None and self.unitcell_angles is not None:
raise AttributeError('unitcell angles data exists, but no lengths')
if self.unitcell_lengths is not None and np.any(self.unitcell_lengths < 0):
raise ValueError('unitcell length < 0')
if self.unitcell_angles is not None and np.any(self.unitcell_angles < 0):
raise ValueError('unitcell angle < 0')
@property
def _have_unitcell(self):
return self._unitcell_lengths is not None and self._unitcell_angles is not None
def guess_anchor_molecules(self):
"""Guess anchor molecules for imaging
Returns
-------
anchor_molecules : list of atom sets
List of anchor molecules
"""
if self._topology is None:
raise ValueError('Trajectory must have a Topology that defines molecules')
molecules = self._topology.find_molecules()
# Select the anchor molecules.
molecules.sort(key=lambda x: -len(x))
atoms_cutoff = max(len(molecules[int(0.1*len(molecules))]),
int(0.1*len(molecules[0])))
anchor_molecules = [mol for mol in molecules if len(mol) > atoms_cutoff]
num_anchors = len(anchor_molecules)
if num_anchors == 0:
raise ValueError("Could not find any anchor molecules. Based on "
"our heuristic, those should be molecules with "
"more than {} atoms. Perhaps your topology "
"doesn't give an acurate bond graph?"
.format(atoms_cutoff))
return anchor_molecules
def make_molecules_whole(self, inplace=False, sorted_bonds=None):
"""Only make molecules whole
Parameters
----------
inplace : bool
If False, a new Trajectory is created and returned.
If True, this Trajectory is modified directly.
sorted_bonds : array of shape (n_bonds, 2)
Pairs of atom indices that define bonds, in sorted order.
If not specified, these will be determined from the trajectory's
topology.
See Also
--------
image_molecules()
"""
unitcell_vectors = self.unitcell_vectors
if unitcell_vectors is None:
raise ValueError('This Trajectory does not define a periodic unit cell')
if inplace:
result = self
else:
result = Trajectory(xyz=self.xyz, topology=self.topology,
time=self.time,
unitcell_lengths=self.unitcell_lengths,
unitcell_angles=self.unitcell_angles)
if sorted_bonds is None:
sorted_bonds = sorted(self._topology.bonds, key=lambda bond: bond[0].index)
sorted_bonds = np.asarray([[b0.index, b1.index] for b0, b1 in sorted_bonds])
box = np.asarray(result.unitcell_vectors, order='c')
_geometry.whole_molecules(result.xyz, box, sorted_bonds)
if not inplace:
return result
return self
def image_molecules(self, inplace=False, anchor_molecules=None, other_molecules=None, sorted_bonds=None, make_whole=True):
"""Recenter and apply periodic boundary conditions to the molecules in each frame of the trajectory.
This method is useful for visualizing a trajectory in which molecules were not wrapped
to the periodic unit cell, or in which the macromolecules are not centered with respect
to the solvent. It tries to be intelligent in deciding what molecules to center, so you
can simply call it and trust that it will "do the right thing".
Parameters
----------
inplace : bool, default=False
If False, a new Trajectory is created and returned. If True, this Trajectory
is modified directly.
anchor_molecules : list of atom sets, optional, default=None
Molecule that should be treated as an "anchor".
These molecules will be centered in the box and put near each other.
If not specified, anchor molecules are guessed using a heuristic.
other_molecules : list of atom sets, optional, default=None
Molecules that are not anchors. If not specified,
these will be molecules other than the anchor molecules
sorted_bonds : array of shape (n_bonds, 2)
Pairs of atom indices that define bonds, in sorted order.
If not specified, these will be determined from the trajectory's
topology. Only relevant if ``make_whole`` is True.
make_whole : bool
Whether to make molecules whole.
Returns
-------
traj : md.Trajectory
The return value is either ``self`` or the new trajectory,
depending on the value of ``inplace``.
See Also
--------
Trajectory.guess_anchor_molecules
"""
unitcell_vectors = self.unitcell_vectors
if unitcell_vectors is None:
raise ValueError('This Trajectory does not define a periodic unit cell')
if anchor_molecules is None:
anchor_molecules = self.guess_anchor_molecules()
if other_molecules is None:
# Determine other molecules by which molecules are not anchor molecules
molecules = self._topology.find_molecules()
other_molecules = [mol for mol in molecules if mol not in anchor_molecules]
# Expand molecules into atom indices
anchor_molecules_atom_indices = [np.fromiter((a.index for a in mol), dtype=np.int32) for mol in anchor_molecules]
other_molecules_atom_indices = [np.fromiter((a.index for a in mol), dtype=np.int32) for mol in other_molecules]
if inplace:
result = self
else:
result = Trajectory(xyz=self.xyz, topology=self.topology, time=self.time,
unitcell_lengths=self.unitcell_lengths, unitcell_angles=self.unitcell_angles)
if make_whole and sorted_bonds is None:
sorted_bonds = sorted(self._topology.bonds, key=lambda bond: bond[0].index)
sorted_bonds = np.asarray([[b0.index, b1.index] for b0, b1 in sorted_bonds])
elif not make_whole:
sorted_bonds = None
box = np.asarray(result.unitcell_vectors, order='c')
_geometry.image_molecules(result.xyz, box, anchor_molecules_atom_indices, other_molecules_atom_indices, sorted_bonds)
if not inplace:
return result
return self
| lgpl-2.1 |
ScottHull/Exoplanet-Pocketknife | old/calc_depleted_lith.py | 1 | 7067 | import os
import pandas as pd
bsp_files = [
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/adibekyan_bsp_compositions.csv",
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/kepler_bsp_compositions.csv"
]
morb_files = [
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/adibekyan_morb_compositions_f1200.csv",
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/adibekyan_morb_compositions_f1400.csv",
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/adibekyan_morb_compositions_f1600.csv",
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/kepler_morb_compositions_f1200.csv",
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/kepler_morb_compositions_f1400.csv",
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/kepler_morb_compositions_f1600.csv"
]
starting_masses_files = [
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/adibekyan_morb_starting_masses_f1200.csv",
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/adibekyan_morb_starting_masses_f1400.csv",
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/adibekyan_morb_starting_masses_f1600.csv",
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/kepler_morb_starting_masses_f1200.csv",
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/kepler_morb_starting_masses_f1400.csv",
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/kepler_morb_starting_masses_f1600.csv"
]
outfile_names = [
"adibekyan_f1200_depleted_lithosphere_oxides.csv",
"adibekyan_f1400_depleted_lithosphere_oxides.csv",
"adibekyan_f1500_depleted_lithosphere_oxides.csv",
"kepler_f1200_depleted_lithosphere_oxides.csv",
"kepler_f1400_depleted_lithosphere_oxides.csv",
"kepler_f1500_depleted_lithosphere_oxides.csv",
]
for index, i in enumerate(morb_files):
outfile_name = outfile_names[index]
if outfile_name in os.listdir(os.getcwd()):
os.remove(outfile_name)
outfile = open(outfile_name, "a")
header = ["Star", "FeO", "Na2O", "MgO", "Al2O3", "SiO2", "CaO", "TiO2", "Cr2O3"]
header_line = ",".join(str(i) for i in header)
outfile.write(header_line + "\n")
bsp = None
df = pd.read_csv(i)
print(starting_masses_files[index])
starting_mass = pd.read_csv(starting_masses_files[index], index_col='star')
if "adibekyan" in i:
# print(i)
# print(bsp_files[0])
# print(starting_masses_files[index])
bsp = pd.read_csv(bsp_files[0], index_col='Star')
else:
# print(i)
# print(bsp_files[0])
# print(starting_masses_files[index])
bsp = pd.read_csv(bsp_files[1], index_col='Star')
for row in df.index:
star = df['Star'][row]
# print(star)
try:
bsp_mass = starting_mass['initial_mass'][star]
bsp_feo = bsp['FeO'][star] / 100.0
bsp_na2o = bsp['Na2O'][star] / 100.0
bsp_mgo = bsp['MgO'][star] / 100.0
bsp_al2o3 = bsp['Al2O3'][star] / 100.0
bsp_sio2 = bsp['SiO2'][star] / 100.0
bsp_cao = bsp['CaO'][star] / 100.0
bsp_tio2 = bsp['TiO2'][star] / 100.0
bsp_cr2o3 = bsp['Cr2O3'][star] / 100.0
morb_mass = df['Mass'][row]
morb_feo = df['FeO'][row] / 100.0
print(morb_feo)
morb_na2o = df['Na2O'][row] / 100.0
morb_mgo = (df['MgO'][row] / 100.0) / 0.646
morb_al2o3 = df['Al2O3'][row] / 100.0
morb_sio2 = df['SiO2'][row] / 100.0
morb_cao = df['CaO'][row] / 100.0
morb_tio2 = df['TiO2'][row] / 100.0
morb_cr2o3 = df['Cr2O3'][row] / 100.0
morb_oxide_sum = (morb_feo + morb_na2o + morb_mgo + morb_al2o3 + morb_sio2 + morb_cao + morb_tio2 + morb_cr2o3)
morb_feo /= morb_oxide_sum
print(morb_feo)
morb_na2o /= morb_oxide_sum
morb_mgo /= morb_oxide_sum
morb_al2o3 /= morb_oxide_sum
morb_sio2 /= morb_oxide_sum
morb_cao /= morb_oxide_sum
morb_tio2 /= morb_oxide_sum
morb_cr2o3 /= morb_oxide_sum
morb_oxide_sum2 = (morb_feo + morb_na2o + morb_mgo + morb_al2o3 + morb_sio2 + morb_cao + morb_tio2 + morb_cr2o3)
morb_feo_mass = morb_feo * morb_mass
morb_na2o_mass = morb_na2o * morb_mass
morb_mgo_mass = morb_mgo * morb_mass
morb_al2o3_mass = morb_al2o3 * morb_mass
morb_sio2_mass = morb_sio2 * morb_mass
morb_cao_mass = morb_cao * morb_mass
morb_tio2_mass = morb_tio2 * morb_mass
morb_cr2o3_mass = morb_cr2o3 * morb_mass
bsp_feo_mass = bsp_feo * bsp_mass
bsp_na2o_mass = bsp_na2o * bsp_mass
bsp_mgo_mass = bsp_mgo * bsp_mass
bsp_al2o3_mass = bsp_al2o3 * bsp_mass
bsp_sio2_mass = bsp_sio2 * bsp_mass
bsp_cao_mass = bsp_cao * bsp_mass
bsp_tio2_mass = bsp_tio2 * bsp_mass
bsp_cr2o3_mass = bsp_cr2o3 * bsp_mass
depleted_feo = bsp_feo_mass - morb_feo_mass
depleted_na2o = bsp_na2o_mass - morb_na2o_mass
depleted_mgo = bsp_mgo_mass - morb_mgo_mass
depleted_al2o3 = bsp_al2o3_mass - morb_al2o3_mass
depleted_sio2 = bsp_sio2_mass - morb_sio2_mass
depleted_cao = bsp_cao_mass - morb_cao_mass
depleted_tio2 = bsp_tio2_mass - morb_tio2_mass
depleted_cr2o3 = bsp_cr2o3_mass - morb_cr2o3_mass
depleted_oxide_sum = (depleted_feo + depleted_na2o + depleted_mgo + depleted_al2o3 + depleted_sio2 +
depleted_cao + depleted_tio2 + depleted_cr2o3)
depleted_feo = depleted_feo / depleted_oxide_sum * 100.0
depleted_na2o = depleted_na2o / depleted_oxide_sum * 100.0
depleted_mgo = depleted_mgo / depleted_oxide_sum * 100.0
depleted_al2o3 = depleted_al2o3 / depleted_oxide_sum * 100.0
depleted_sio2 = depleted_sio2 / depleted_oxide_sum * 100.0
depleted_cao = depleted_cao / depleted_oxide_sum * 100.0
depleted_tio2 = depleted_tio2 / depleted_oxide_sum * 100.0
depleted_cr2o3 = depleted_cr2o3 / depleted_oxide_sum * 100.0
line = ",".join(str(j) for j in [
star, depleted_feo, depleted_na2o, depleted_mgo, depleted_al2o3, depleted_sio2, depleted_cao, depleted_tio2,
depleted_cr2o3
])
outfile.write(line + "\n")
except:
outfile.write(star + "\n")
outfile.close()
| cc0-1.0 |
chvogl/tardis | tardis/conftest.py | 5 | 4159 | from astropy.tests.pytest_plugins import *
def pytest_addoption(parser):
parser.addoption("--remote-data", action="store_true",
help="run tests with online data")
parser.addoption("--open-files", action="store_true",
help="fail if any test leaves files open")
parser.addoption("--doctest-plus", action="store_true",
help="enable running doctests with additional "
"features not found in the normal doctest "
"plugin")
parser.addoption("--doctest-rst", action="store_true",
help="enable running doctests in .rst documentation")
parser.addini("doctest_plus", "enable running doctests with additional "
"features not found in the normal doctest plugin")
parser.addini("doctest_norecursedirs",
"like the norecursedirs option but applies only to doctest "
"collection", type="args", default=())
parser.addini("doctest_rst",
"Run the doctests in the rst documentation",
default=False)
parser.addoption('--repeat', action='store',
help='Number of times to repeat each test')
parser.addoption("--atomic-dataset", dest='atomic-dataset', default=None,
help="filename for atomic dataset")
def pytest_report_header(config):
stdoutencoding = getattr(sys.stdout, 'encoding') or 'ascii'
s = "\n"
if six.PY2:
args = [x.decode('utf-8') for x in config.args]
elif six.PY3:
args = config.args
s += "Running tests in {0}.\n\n".format(" ".join(args))
from platform import platform
plat = platform()
if isinstance(plat, bytes):
plat = plat.decode(stdoutencoding, 'replace')
s += "Platform: {0}\n\n".format(plat)
s += "Executable: {0}\n\n".format(sys.executable)
s += "Full Python Version: \n{0}\n\n".format(sys.version)
s += "encodings: sys: {0}, locale: {1}, filesystem: {2}".format(
sys.getdefaultencoding(),
locale.getpreferredencoding(),
sys.getfilesystemencoding())
if sys.version_info < (3, 3, 0):
s += ", unicode bits: {0}".format(
int(math.log(sys.maxunicode, 2)))
s += '\n'
s += "byteorder: {0}\n".format(sys.byteorder)
s += "float info: dig: {0.dig}, mant_dig: {0.dig}\n\n".format(
sys.float_info)
import numpy
s += "numpy: {0}\n".format(numpy.__version__)
try:
import scipy
s += "scipy: {0}\n".format(scipy.__version__)
except:
s += "scipy: not available\n"
try:
import pandas
s += "pandas: {0}\n".format(pandas.__version__)
except:
s += "pandas: not available\n"
try:
import astropy
except:
s += "astropy: not available\n"
else:
s += "astropy: {0}\n".format(astropy.__version__)
try:
import yaml
except:
s += "yaml: not available\n"
else:
s += "yaml: {0}\n".format(yaml.__version__)
try:
import cython
except:
s += "cython: not available\n"
else:
s += "cython: {0}\n".format(cython.__version__)
try:
import h5py.version
s += "h5py: {0}\n".format(h5py.version.version)
except:
s += "h5py: not available\n"
try:
import matplotlib
s += "matplotlib: {0}\n".format(matplotlib.__version__)
except:
s += "matplotlib: not available\n"
try:
import IPython
except:
s += "ipython: not available\n"
else:
s += "ipython: {0}\n".format(IPython.__version__)
special_opts = ["remote_data", "pep8"]
opts = []
for op in special_opts:
if getattr(config.option, op, None):
opts.append(op)
if opts:
s += "Using Astropy options: {0}.\n".format(" ".join(opts))
if six.PY3 and (config.getini('doctest_rst') or config.option.doctest_rst):
s += "Running doctests in .rst files is not supported on Python 3.x\n"
if not six.PY3:
s = s.encode(stdoutencoding, 'replace')
return s | bsd-3-clause |
xuewenfei/anki | oldanki/graphs.py | 20 | 14438 | # -*- coding: utf-8 -*-
# Copyright: Damien Elmes <[email protected]>
# License: GNU GPL, version 3 or later; http://www.gnu.org/copyleft/gpl.html
"""\
Graphs of deck statistics
==============================
"""
__docformat__ = 'restructuredtext'
import os, sys, time
import oldanki.stats
from oldanki.lang import _
import datetime
#colours for graphs
dueYoungC = "#ffb380"
dueMatureC = "#ff5555"
dueCumulC = "#ff8080"
reviewNewC = "#80ccff"
reviewYoungC = "#3377ff"
reviewMatureC = "#0000ff"
reviewTimeC = "#0fcaff"
easesNewC = "#80b3ff"
easesYoungC = "#5555ff"
easesMatureC = "#0f5aff"
addedC = "#b3ff80"
firstC = "#b380ff"
intervC = "#80e5ff"
# support frozen distribs
if sys.platform.startswith("darwin"):
try:
del os.environ['MATPLOTLIBDATA']
except:
pass
try:
from matplotlib.figure import Figure
except UnicodeEncodeError:
# haven't tracked down the cause of this yet, but reloading fixes it
try:
from matplotlib.figure import Figure
except ImportError:
pass
except ImportError:
pass
def graphsAvailable():
return 'matplotlib' in sys.modules
class DeckGraphs(object):
def __init__(self, deck, width=8, height=3, dpi=75, selective=True):
self.deck = deck
self.stats = None
self.width = width
self.height = height
self.dpi = dpi
self.selective = selective
def calcStats (self):
if not self.stats:
days = {}
daysYoung = {}
daysMature = {}
months = {}
next = {}
lowestInDay = 0
self.endOfDay = self.deck.failedCutoff
t = time.time()
young = """
select interval, combinedDue from cards c
where relativeDelay between 0 and 1 and type >= 0 and interval <= 21"""
mature = """
select interval, combinedDue
from cards c where relativeDelay = 1 and type >= 0 and interval > 21"""
if self.selective:
young = self.deck._cardLimit("revActive", "revInactive",
young)
mature = self.deck._cardLimit("revActive", "revInactive",
mature)
young = self.deck.s.all(young)
mature = self.deck.s.all(mature)
for (src, dest) in [(young, daysYoung),
(mature, daysMature)]:
for (interval, due) in src:
day=int(round(interval))
days[day] = days.get(day, 0) + 1
indays = int(((due - self.endOfDay) / 86400.0) + 1)
next[indays] = next.get(indays, 0) + 1 # type-agnostic stats
dest[indays] = dest.get(indays, 0) + 1 # type-specific stats
if indays < lowestInDay:
lowestInDay = indays
self.stats = {}
self.stats['next'] = next
self.stats['days'] = days
self.stats['daysByType'] = {'young': daysYoung,
'mature': daysMature}
self.stats['months'] = months
self.stats['lowestInDay'] = lowestInDay
dayReps = self.deck.s.all("""
select day,
matureEase0+matureEase1+matureEase2+matureEase3+matureEase4 as matureReps,
reps-(newEase0+newEase1+newEase2+newEase3+newEase4) as combinedYoungReps,
reps as combinedNewReps
from stats
where type = 1""")
dayTimes = self.deck.s.all("""
select day, reviewTime as reviewTime
from stats
where type = 1""")
todaydt = self.deck._dailyStats.day
for dest, source in [("dayRepsNew", "combinedNewReps"),
("dayRepsYoung", "combinedYoungReps"),
("dayRepsMature", "matureReps")]:
self.stats[dest] = dict(
map(lambda dr: (-(todaydt -datetime.date(
*(int(x)for x in dr["day"].split("-")))).days, dr[source]), dayReps))
self.stats['dayTimes'] = dict(
map(lambda dr: (-(todaydt -datetime.date(
*(int(x)for x in dr["day"].split("-")))).days, dr["reviewTime"]/60.0), dayTimes))
def nextDue(self, days=30):
self.calcStats()
fig = Figure(figsize=(self.width, self.height), dpi=self.dpi)
graph = fig.add_subplot(111)
dayslists = [self.stats['next'], self.stats['daysByType']['mature']]
for dayslist in dayslists:
self.addMissing(dayslist, self.stats['lowestInDay'], days)
argl = []
for dayslist in dayslists:
dl = [x for x in dayslist.items() if x[0] <= days]
argl.extend(list(self.unzip(dl)))
self.varGraph(graph, days, [dueYoungC, dueMatureC], *argl)
cheat = fig.add_subplot(111)
b1 = cheat.bar(0, 0, color = dueYoungC)
b2 = cheat.bar(1, 0, color = dueMatureC)
cheat.legend([b1, b2], [
"Young",
"Mature"], loc='upper right')
graph.set_xlim(xmin=self.stats['lowestInDay'], xmax=days+1)
graph.set_xlabel("Day (0 = today)")
graph.set_ylabel("Cards Due")
return fig
def workDone(self, days=30):
self.calcStats()
for type in ["dayRepsNew", "dayRepsYoung", "dayRepsMature"]:
self.addMissing(self.stats[type], -days, 0)
fig = Figure(figsize=(self.width, self.height), dpi=self.dpi)
graph = fig.add_subplot(111)
args = sum((self.unzip(self.stats[type].items(), limit=days, reverseLimit=True) for type in ["dayRepsMature", "dayRepsYoung", "dayRepsNew"][::-1]), [])
self.varGraph(graph, days, [reviewNewC, reviewYoungC, reviewMatureC], *args)
cheat = fig.add_subplot(111)
b1 = cheat.bar(-3, 0, color = reviewNewC)
b2 = cheat.bar(-4, 0, color = reviewYoungC)
b3 = cheat.bar(-5, 0, color = reviewMatureC)
cheat.legend([b1, b2, b3], [
"New",
"Young",
"Mature"], loc='upper left')
graph.set_xlim(xmin=-days+1, xmax=1)
graph.set_ylim(ymax=max(max(a for a in args[1::2])) + 10)
graph.set_xlabel("Day (0 = today)")
graph.set_ylabel("Cards Answered")
return fig
def timeSpent(self, days=30):
self.calcStats()
fig = Figure(figsize=(self.width, self.height), dpi=self.dpi)
times = self.stats['dayTimes']
self.addMissing(times, -days+1, 0)
times = self.unzip([(day,y) for (day,y) in times.items()
if day + days >= 0])
graph = fig.add_subplot(111)
self.varGraph(graph, days, reviewTimeC, *times)
graph.set_xlim(xmin=-days+1, xmax=1)
graph.set_ylim(ymax=max(a for a in times[1]) + 0.1)
graph.set_xlabel("Day (0 = today)")
graph.set_ylabel("Minutes")
return fig
def cumulativeDue(self, days=30):
self.calcStats()
fig = Figure(figsize=(self.width, self.height), dpi=self.dpi)
graph = fig.add_subplot(111)
self.addMissing(self.stats['next'], 0, days-1)
dl = [x for x in self.stats['next'].items() if x[0] <= days]
(x, y) = self.unzip(dl)
count=0
y = list(y)
for i in range(len(x)):
count = count + y[i]
if i == 0:
continue
y[i] = count
if x[i] > days:
break
self._filledGraph(graph, days, dueCumulC, 1, x, y)
graph.set_xlim(xmin=self.stats['lowestInDay'], xmax=days-1)
graph.set_ylim(ymax=graph.get_ylim()[1]+10)
graph.set_xlabel("Day (0 = today)")
graph.set_ylabel("Cards Due")
return fig
def intervalPeriod(self, days=30):
self.calcStats()
fig = Figure(figsize=(self.width, self.height), dpi=self.dpi)
ints = self.stats['days']
self.addMissing(ints, 0, days)
intervals = self.unzip(ints.items(), limit=days)
graph = fig.add_subplot(111)
self.varGraph(graph, days, intervC, *intervals)
graph.set_xlim(xmin=0, xmax=days+1)
graph.set_xlabel("Card Interval")
graph.set_ylabel("Number of Cards")
return fig
def addedRecently(self, numdays=30, attr='created'):
self.calcStats()
days = {}
fig = Figure(figsize=(self.width, self.height), dpi=self.dpi)
limit = self.endOfDay - (numdays) * 86400
res = self.deck.s.column0("select %s from cards where %s >= %f" %
(attr, attr, limit))
for r in res:
d = int((r - self.endOfDay) / 86400.0)
days[d] = days.get(d, 0) + 1
self.addMissing(days, -numdays+1, 0)
graph = fig.add_subplot(111)
intervals = self.unzip(days.items())
if attr == 'created':
colour = addedC
else:
colour = firstC
self.varGraph(graph, numdays, colour, *intervals)
graph.set_xlim(xmin=-numdays+1, xmax=1)
graph.set_xlabel("Day (0 = today)")
if attr == 'created':
graph.set_ylabel("Cards Added")
else:
graph.set_ylabel("Cards First Answered")
return fig
def addMissing(self, dic, min, max):
for i in range(min, max+1):
if not i in dic:
dic[i] = 0
def unzip(self, tuples, fillFix=True, limit=None, reverseLimit=False):
tuples.sort(cmp=lambda x,y: cmp(x[0], y[0]))
if limit:
if reverseLimit:
tuples = tuples[-limit:]
else:
tuples = tuples[:limit+1]
new = zip(*tuples)
return new
def varGraph(self, graph, days, colours=["b"], *args):
if len(args[0]) < 120:
return self.barGraph(graph, days, colours, *args)
else:
return self.filledGraph(graph, days, colours, *args)
def filledGraph(self, graph, days, colours=["b"], *args):
self._filledGraph(graph, days, colours, 0, *args)
def _filledGraph(self, graph, days, colours, lw, *args):
if isinstance(colours, str):
colours = [colours]
for triplet in [(args[n], args[n + 1], colours[n / 2]) for n in range(0, len(args), 2)]:
x = list(triplet[0])
y = list(triplet[1])
c = triplet[2]
lowest = 99999
highest = -lowest
for i in range(len(x)):
if x[i] < lowest:
lowest = x[i]
if x[i] > highest:
highest = x[i]
# ensure the filled area reaches the bottom
x.insert(0, lowest - 1)
y.insert(0, 0)
x.append(highest + 1)
y.append(0)
# plot
graph.fill(x, y, c, lw=lw)
graph.grid(True)
graph.set_ylim(ymin=0, ymax=max(2, graph.get_ylim()[1]))
def barGraph(self, graph, days, colours, *args):
if isinstance(colours, str):
colours = [colours]
lim = None
for triplet in [(args[n], args[n + 1], colours[n / 2]) for n in range(0, len(args), 2)]:
x = list(triplet[0])
y = list(triplet[1])
c = triplet[2]
lw = 0
if lim is None:
lim = (x[0], x[-1])
length = (lim[1] - lim[0])
if len(args) > 4:
if length <= 30:
lw = 1
else:
if length <= 90:
lw = 1
lowest = 99999
highest = -lowest
for i in range(len(x)):
if x[i] < lowest:
lowest = x[i]
if x[i] > highest:
highest = x[i]
graph.bar(x, y, color=c, width=1, linewidth=lw)
graph.grid(True)
graph.set_ylim(ymin=0, ymax=max(2, graph.get_ylim()[1]))
import numpy as np
if length > 10:
step = length / 10.0
# python's range() won't accept float step args, so we do it manually
if lim[0] < 0:
ticks = [int(lim[1] - step * x) for x in range(10)]
else:
ticks = [int(lim[0] + step * x) for x in range(10)]
else:
ticks = list(xrange(lim[0], lim[1]+1))
graph.set_xticks(np.array(ticks) + 0.5)
graph.set_xticklabels([str(int(x)) for x in ticks])
for tick in graph.xaxis.get_major_ticks():
tick.tick1On = False
tick.tick2On = False
def easeBars(self):
fig = Figure(figsize=(3, 3), dpi=self.dpi)
graph = fig.add_subplot(111)
types = ("new", "young", "mature")
enum = 5
offset = 0
arrsize = 16
arr = [0] * arrsize
n = 0
colours = [easesNewC, easesYoungC, easesMatureC]
bars = []
gs = oldanki.stats.globalStats(self.deck)
for type in types:
total = (getattr(gs, type + "Ease0") +
getattr(gs, type + "Ease1") +
getattr(gs, type + "Ease2") +
getattr(gs, type + "Ease3") +
getattr(gs, type + "Ease4"))
setattr(gs, type + "Ease1", getattr(gs, type + "Ease0") +
getattr(gs, type + "Ease1"))
setattr(gs, type + "Ease0", -1)
for e in range(1, enum):
try:
arr[e+offset] = (getattr(gs, type + "Ease%d" % e)
/ float(total)) * 100 + 1
except ZeroDivisionError:
arr[e+offset] = 0
bars.append(graph.bar(range(arrsize), arr, width=1.0,
color=colours[n], align='center'))
arr = [0] * arrsize
offset += 5
n += 1
x = ([""] + [str(n) for n in range(1, enum)]) * 3
graph.legend([p[0] for p in bars], ("New",
"Young",
"Mature"),
'upper left')
graph.set_ylim(ymax=100)
graph.set_xlim(xmax=15)
graph.set_xticks(range(arrsize))
graph.set_xticklabels(x)
graph.set_ylabel("% of Answers")
graph.set_xlabel("Answer Buttons")
graph.grid(True)
return fig
| agpl-3.0 |
henridwyer/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | 267 | 6813 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| bsd-3-clause |
AlexanderFabisch/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 354 | 4124 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
bourbakilee/motion-planning | CollisionDetection/CollisionDetection/backup.py | 1 | 3741 | # 2015.09.20, LI Yunsheng
import numpy as np
from scipy.fftpack import fft2, ifft2
import matplotlib.pyplot as plt
class Grid():
# M - column, N - row must be even
def __init__(self,dM,dN,data):
self.M = data.shape[1]
self.N = data.shape[0]
self.dM = dM
self.dN = dN
self.data = data
@property
def width(self):
return M*dM
@property
def height(self):
return N*dN
class Circle():
def __init__(self, r):
#self.x = 0
#self.y = 0
self.r = r
def mesh(self, grid_map):
# return NXM array
R = np.zeros((grid_map.N, grid_map.M))
j = np.floor(-self.r/grid_map.dN + 0.5)
y = (j+0.5)*grid_map.dN
while j < 0:
x = np.sqrt(self.r**2 - y**2)
i = np.floor(-x/grid_map.dM + 0.5)
i_list = np.linspace(i, -i, -2*i+1)
#i_list = np.where(i_list<0, i_list+grid_map.M, i_list)
#jj=j+grid_map.N
for ii in i_list:
R[ii,j] = 1
j += 1
y += grid_map.dN
j = 0
y = -grid_map.dN/2
while y < self.r:
x = np.sqrt(self.r**2 - y**2)
i = np.floor(-x/grid_map.dM + 0.5)
i_list = np.linspace(i, -i, -2*i+1)
#i_list = np.where(i_list<0, i_list+grid_map.N, i_list)
for ii in i_list:
R[ii,j] = 1
j += 1
y += grid_map.dN
return R
def moveto(self,disk_mesh,x,y):
# disk_mesh - NXM array
# x,y > 0
R = np.zeros(disk_mesh.shape)
for i in range(R.shape[0]):
for j in range(R.shape[1]):
R[i,j] = disk_mesh[i-x, j-y]
return R
class Veh_Cfg():
# (x,y)-center of rear axis
def __init__(self, x,y,t,l1,l2,w):
self.x = x
self.y = y
self.t = t
self.l1 = l1
self.l2 = l2
self.length = l1+l2
self.width = w
self.r = np.sqrt(self.length**2/9 + self.width**2/4) # radius of circles, which cover the vehicle
self.d = 2*self.length/3
def centers_of_circles(self,grid_map):
c = np.zeros((3,2))
direction = np.array([np.sin(self.t),np.cos(self.t)])
c[1] = np.array([self.x,self.y]) + (self.l1 - self.l2)/2 * direction
c[0] = c[1] - self.d * direction
c[2] = c[1] + self.d * direction
return np.floor(c/np.array([[grid_map.dM, grid_map.dN]]))
def cost(self, centers, costmap):
return np.max([costmap[tuple(c)] for c in centers])
N=1000 # map size
delta =0.1 # incremental distance
eps = 0.1 # numerical err
obstacles = np.zeros((N,N)) # obstacles
obstacles[0, :] = 1
obstacles[N-1, :] = 1
obstacles[:, 0] = 1
obstacles[:, N-1] = 1
obstacles[400:600, 400:600] = 1
workspace = Grid(delta,delta,obstacles)
veh = Veh_Cfg(25,25,np.pi/4,4,1,2)
disk = Circle(veh.r)
disk_mesh = disk.mesh(workspace)
centers = veh.centers_of_circles(workspace)
veh_mesh = np.zeros((N,N))
veh_mesh += disk.moveto(disk_mesh,centers[0,0],centers[0,1])
veh_mesh += disk.moveto(disk_mesh,centers[1,0],centers[1,1])
veh_mesh += disk.moveto(disk_mesh,centers[2,0],centers[2,1])
veh_mesh = np.where(veh_mesh>0, 0.8, 0)
# 还是Emacs好用,呵呵
Obstacles = fft2(obstacles)
Robot = fft2(disk_mesh)
CostMap = Obstacles * Robot
costmap = np.real(ifft2(CostMap))
costmap = np.where(costmap > eps, 0.5, 0) + obstacles + veh_mesh
costmap = np.where(costmap > 1, 1, costmap)
cost = veh.cost(centers,costmap)
print(cost)
x=np.linspace(1,99,990)
y=20+10*np.sin(x)
plt.imshow(costmap, cmap=plt.cm.gray_r, origin="lower", extent=(0,100,0,100))
plt.plot(x,y)
# plt.rc('figure', figsize=(1000,1000))
plt.show()
| gpl-3.0 |
MalkIPP/openfisca-france-data | openfisca_france_data/zone_apl_data/zone_apl/zone_apl_imputation_data_reader.py | 4 | 2770 | # -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <[email protected]>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import pickle
from pandas import read_csv, DataFrame
import numpy as np
if __name__ == '__main__':
with open('zone_apl_2006.csv') as zone_csv:
Z = read_csv(zone_csv, delimiter = ";")
#% PSDC99 population sans double compte 99
#% Pop_mun_2006 population municipale 2006
#% M.POL99 de 1 à 4
#% REG de 11 à 94
#% TAU99 0 à 10
#% TU99 0 à 8
#% zone
#
grouped_5 = Z.groupby(['TU99','TAU99','REG','POL99','Zone'], as_index=False)
pop = grouped_5['Pop_mun_2006'].aggregate(np.sum)
# prepare result matrix by building empty result matrix
res = pop.copy()
res.pop('Zone')
res.pop('Pop_mun_2006')
res['zone1'] = 0
res['zone2'] = 0
res['zone3'] = 0
print res
print pop.Pop_mun_2006[pop['Zone']==1]
res['zone1'] = res['zone1'] + pop.Pop_mun_2006[pop['Zone']==1]
res['zone2'] = res['zone2'] + pop.Pop_mun_2006[pop['Zone']==2]
res['zone3'] = res['zone3'] + pop.Pop_mun_2006[pop['Zone']==3]
print res.to_string()
for col in ('zone1','zone2','zone3'):
res[col][np.isnan(res[col])] = 0
print res.to_string()
res2 = res.groupby(['TU99','TAU99', 'REG','POL99'])
final = res2.agg({'zone1': np.sum,
'zone2': np.sum,
'zone3': np.sum})
final['total'] = final['zone1'] + final['zone2'] + final['zone3']
final['proba_zone1'] = final['zone1']/final['total']
final['proba_zone2'] = final['zone2']/final['total']
final['proba_zone3'] = final['zone3']/final['total']
final.pop('zone1')
final.pop('zone2')
final.pop('zone3')
final.pop('total')
final = final.reset_index()
print final
# Sanity check
# s = final['p1'] + final['p2'] + final['p3']
final.to_csv('./zone_apl_imputation_data.csv')
| agpl-3.0 |
alfonsokim/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_wxagg.py | 70 | 9051 | from __future__ import division
"""
backend_wxagg.py
A wxPython backend for Agg. This uses the GUI widgets written by
Jeremy O'Donoghue ([email protected]) and the Agg backend by John
Hunter ([email protected])
Copyright (C) 2003-5 Jeremy O'Donoghue, John Hunter, Illinois Institute of
Technology
License: This work is licensed under the matplotlib license( PSF
compatible). A copy should be included with this source code.
"""
import wx
import matplotlib
from matplotlib.figure import Figure
from backend_agg import FigureCanvasAgg
import backend_wx
from backend_wx import FigureManager, FigureManagerWx, FigureCanvasWx, \
FigureFrameWx, DEBUG_MSG, NavigationToolbar2Wx, error_msg_wx, \
draw_if_interactive, show, Toolbar, backend_version
class FigureFrameWxAgg(FigureFrameWx):
def get_canvas(self, fig):
return FigureCanvasWxAgg(self, -1, fig)
def _get_toolbar(self, statbar):
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbarWx(self.canvas, True)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2WxAgg(self.canvas)
toolbar.set_status_bar(statbar)
else:
toolbar = None
return toolbar
class FigureCanvasWxAgg(FigureCanvasAgg, FigureCanvasWx):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually)
lives inside a frame instantiated by a FigureManagerWx. The parent
window probably implements a wxSizer to control the displayed
control size - but we give a hint as to our preferred minimum
size.
"""
def draw(self, drawDC=None):
"""
Render the figure using agg.
"""
DEBUG_MSG("draw()", 1, self)
FigureCanvasAgg.draw(self)
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC)
def blit(self, bbox=None):
"""
Transfer the region of the agg buffer defined by bbox to the display.
If bbox is None, the entire buffer is transferred.
"""
if bbox is None:
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self.gui_repaint()
return
l, b, w, h = bbox.bounds
r = l + w
t = b + h
x = int(l)
y = int(self.bitmap.GetHeight() - t)
srcBmp = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destDC = wx.MemoryDC()
destDC.SelectObject(self.bitmap)
destDC.BeginDrawing()
destDC.Blit(x, y, int(w), int(h), srcDC, x, y)
destDC.EndDrawing()
destDC.SelectObject(wx.NullBitmap)
srcDC.SelectObject(wx.NullBitmap)
self.gui_repaint()
filetypes = FigureCanvasAgg.filetypes
def print_figure(self, filename, *args, **kwargs):
# Use pure Agg renderer to draw
FigureCanvasAgg.print_figure(self, filename, *args, **kwargs)
# Restore the current view; this is needed because the
# artist contains methods rely on particular attributes
# of the rendered figure for determining things like
# bounding boxes.
if self._isDrawn:
self.draw()
class NavigationToolbar2WxAgg(NavigationToolbar2Wx):
def get_canvas(self, frame, fig):
return FigureCanvasWxAgg(frame, -1, fig)
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
DEBUG_MSG("new_figure_manager()", 3, None)
backend_wx._create_wx_app()
FigureClass = kwargs.pop('FigureClass', Figure)
fig = FigureClass(*args, **kwargs)
frame = FigureFrameWxAgg(num, fig)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
return figmgr
#
# agg/wxPython image conversion functions (wxPython <= 2.6)
#
def _py_convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
image = wx.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
if bbox is None:
# agg => rgb -> image
return image
else:
# agg => rgb -> image => bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_clipped_image_as_bitmap(image, bbox))
def _py_convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image => bitmap
return wx.BitmapFromImage(_py_convert_agg_to_wx_image(agg, None))
else:
# agg => rgb -> image => bitmap => clipped bitmap
return _clipped_image_as_bitmap(
_py_convert_agg_to_wx_image(agg, None),
bbox)
def _clipped_image_as_bitmap(image, bbox):
"""
Convert the region of a wx.Image bounded by bbox to a wx.Bitmap.
"""
l, b, width, height = bbox.get_bounds()
r = l + width
t = b + height
srcBmp = wx.BitmapFromImage(image)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wx.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
destDC.BeginDrawing()
x = int(l)
y = int(image.GetHeight() - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
destDC.EndDrawing()
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
#
# agg/wxPython image conversion functions (wxPython >= 2.8)
#
def _py_WX28_convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image
image = wx.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
return image
else:
# agg => rgba buffer -> bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_WX28_clipped_agg_as_bitmap(agg, bbox))
def _py_WX28_convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgba buffer -> bitmap
return wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba(0, 0))
else:
# agg => rgba buffer -> bitmap => clipped bitmap
return _WX28_clipped_agg_as_bitmap(agg, bbox)
def _WX28_clipped_agg_as_bitmap(agg, bbox):
"""
Convert the region of a the agg buffer bounded by bbox to a wx.Bitmap.
Note: agg must be a backend_agg.RendererAgg instance.
"""
l, b, width, height = bbox.get_bounds()
r = l + width
t = b + height
srcBmp = wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba(0, 0))
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wx.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
destDC.BeginDrawing()
x = int(l)
y = int(int(agg.height) - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
destDC.EndDrawing()
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
def _use_accelerator(state):
"""
Enable or disable the WXAgg accelerator, if it is present and is also
compatible with whatever version of wxPython is in use.
"""
global _convert_agg_to_wx_image
global _convert_agg_to_wx_bitmap
if getattr(wx, '__version__', '0.0')[0:3] < '2.8':
# wxPython < 2.8, so use the C++ accelerator or the Python routines
if state and _wxagg is not None:
_convert_agg_to_wx_image = _wxagg.convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _wxagg.convert_agg_to_wx_bitmap
else:
_convert_agg_to_wx_image = _py_convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _py_convert_agg_to_wx_bitmap
else:
# wxPython >= 2.8, so use the accelerated Python routines
_convert_agg_to_wx_image = _py_WX28_convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _py_WX28_convert_agg_to_wx_bitmap
# try to load the WXAgg accelerator
try:
import _wxagg
except ImportError:
_wxagg = None
# if it's present, use it
_use_accelerator(True)
| agpl-3.0 |
timqian/sms-tools | lectures/3-Fourier-properties/plots-code/symmetry.py | 26 | 1178 | import matplotlib.pyplot as plt
import numpy as np
import sys
from scipy.fftpack import fft, ifft, fftshift
import math
sys.path.append('../../../software/models/')
import utilFunctions as UF
import dftModel as DF
(fs, x) = UF.wavread('../../../sounds/soprano-E4.wav')
w = np.hamming(511)
N = 512
pin = 5000
hM1 = int(math.floor((w.size+1)/2))
hM2 = int(math.floor(w.size/2))
fftbuffer = np.zeros(N)
x1 = x[pin-hM1:pin+hM2]
xw = x1*w
fftbuffer[:hM1] = xw[hM2:]
fftbuffer[N-hM2:] = xw[:hM2]
X = fftshift(fft(fftbuffer))
mX = 20 * np.log10(abs(X))
pX = np.unwrap(np.angle(X))
plt.figure(1, figsize=(9.5, 7))
plt.subplot(311)
plt.plot(np.arange(-hM1, hM2), x1, lw=1.5)
plt.axis([-hM1, hM2, min(x1), max(x1)])
plt.ylabel('amplitude')
plt.title('x (soprano-E4.wav)')
plt.subplot(3,1,2)
plt.plot(np.arange(-N/2,N/2), mX, 'r', lw=1.5)
plt.axis([-N/2,N/2,-48,max(mX)])
plt.title ('mX = 20*log10(abs(X))')
plt.ylabel('amplitude (dB)')
plt.subplot(3,1,3)
plt.plot(np.arange(-N/2,N/2), pX, 'c', lw=1.5)
plt.axis([-N/2,N/2,min(pX),max(pX)])
plt.title ('pX = unwrap(angle(X))')
plt.ylabel('phase (radians)')
plt.tight_layout()
plt.savefig('symmetry.png')
plt.show()
| agpl-3.0 |
ishank08/scikit-learn | doc/sphinxext/sphinx_gallery/gen_rst.py | 23 | 20990 | # -*- coding: utf-8 -*-
# Author: Óscar Nájera
# License: 3-clause BSD
"""
==================
RST file generator
==================
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
# Don't use unicode_literals here (be explicit with u"..." instead) otherwise
# tricky errors come up with exec(code_blocks, ...) calls
from __future__ import division, print_function, absolute_import
from time import time
import codecs
import hashlib
import os
import re
import shutil
import subprocess
import sys
import traceback
import warnings
# Try Python 2 first, otherwise load from Python 3
try:
# textwrap indent only exists in python 3
from textwrap import indent
except ImportError:
def indent(text, prefix, predicate=None):
"""Adds 'prefix' to the beginning of selected lines in 'text'.
If 'predicate' is provided, 'prefix' will only be added to the lines
where 'predicate(line)' is True. If 'predicate' is not provided,
it will default to adding 'prefix' to all non-empty lines that do not
consist solely of whitespace characters.
"""
if predicate is None:
def predicate(line):
return line.strip()
def prefixed_lines():
for line in text.splitlines(True):
yield (prefix + line if predicate(line) else line)
return ''.join(prefixed_lines())
from io import StringIO
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('agg')
matplotlib_backend = matplotlib.get_backend()
if matplotlib_backend != 'agg':
mpl_backend_msg = (
"Sphinx-Gallery relies on the matplotlib 'agg' backend to "
"render figures and write them to files. You are "
"currently using the {} backend. Sphinx-Gallery will "
"terminate the build now, because changing backends is "
"not well supported by matplotlib. We advise you to move "
"sphinx_gallery imports before any matplotlib-dependent "
"import. Moving sphinx_gallery imports at the top of "
"your conf.py file should fix this issue")
raise ValueError(mpl_backend_msg.format(matplotlib_backend))
import matplotlib.pyplot as plt
except ImportError:
# this script can be imported by nosetest to find tests to run: we should
# not impose the matplotlib requirement in that case.
pass
from . import glr_path_static
from .backreferences import write_backreferences, _thumbnail_div
from .downloads import CODE_DOWNLOAD
from .py_source_parser import (get_docstring_and_rest,
split_code_and_text_blocks)
from .notebook import jupyter_notebook, text2string, save_notebook
try:
basestring
except NameError:
basestring = str
unicode = str
###############################################################################
class Tee(object):
"""A tee object to redirect streams to multiple outputs"""
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
# When called from a local terminal seaborn needs it in Python3
def isatty(self):
self.file1.isatty()
class MixedEncodingStringIO(StringIO):
"""Helper when both ASCII and unicode strings will be written"""
def write(self, data):
if not isinstance(data, unicode):
data = data.decode('utf-8')
StringIO.write(self, data)
###############################################################################
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: sphx-glr-horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: /%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: /%s
:align: center
"""
# This one could contain unicode
CODE_OUTPUT = u""".. rst-class:: sphx-glr-script-out
Out::
{0}\n"""
SPHX_GLR_SIG = """\n.. rst-class:: sphx-glr-signature
`Generated by Sphinx-Gallery <http://sphinx-gallery.readthedocs.io>`_\n"""
def codestr2rst(codestr, lang='python'):
"""Return reStructuredText code block from code string"""
code_directive = "\n.. code-block:: {0}\n\n".format(lang)
indented_block = indent(codestr, ' ' * 4)
return code_directive + indented_block
def extract_thumbnail_number(text):
""" Pull out the thumbnail image number specified in the docstring. """
# check whether the user has specified a specific thumbnail image
pattr = re.compile(
r"^\s*#\s*sphinx_gallery_thumbnail_number\s*=\s*([0-9]+)\s*$",
flags=re.MULTILINE)
match = pattr.search(text)
if match is None:
# by default, use the first figure created
thumbnail_number = 1
else:
thumbnail_number = int(match.groups()[0])
return thumbnail_number
def extract_intro(filename):
""" Extract the first paragraph of module-level docstring. max:95 char"""
docstring, _ = get_docstring_and_rest(filename)
# lstrip is just in case docstring has a '\n\n' at the beginning
paragraphs = docstring.lstrip().split('\n\n')
if len(paragraphs) > 1:
first_paragraph = re.sub('\n', ' ', paragraphs[1])
first_paragraph = (first_paragraph[:95] + '...'
if len(first_paragraph) > 95 else first_paragraph)
else:
raise ValueError(
"Example docstring should have a header for the example title "
"and at least a paragraph explaining what the example is about. "
"Please check the example file:\n {}\n".format(filename))
return first_paragraph
def get_md5sum(src_file):
"""Returns md5sum of file"""
with open(src_file, 'rb') as src_data:
src_content = src_data.read()
src_md5 = hashlib.md5(src_content).hexdigest()
return src_md5
def md5sum_is_current(src_file):
"""Checks whether src_file has the same md5 hash as the one on disk"""
src_md5 = get_md5sum(src_file)
src_md5_file = src_file + '.md5'
if os.path.exists(src_md5_file):
with open(src_md5_file, 'r') as file_checksum:
ref_md5 = file_checksum.read()
return src_md5 == ref_md5
return False
def save_figures(image_path, fig_count, gallery_conf):
"""Save all open matplotlib figures of the example code-block
Parameters
----------
image_path : str
Path where plots are saved (format string which accepts figure number)
fig_count : int
Previous figure number count. Figure number add from this number
gallery_conf : dict
Contains the configuration of Sphinx-Gallery
Returns
-------
figure_list : list of str
strings containing the full path to each figure
images_rst : str
rst code to embed the images in the document
"""
figure_list = []
fig_numbers = plt.get_fignums()
for fig_num in fig_numbers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
current_fig = image_path.format(fig_count + fig_num)
fig.savefig(current_fig, **kwargs)
figure_list.append(current_fig)
if gallery_conf.get('find_mayavi_figures', False):
from mayavi import mlab
e = mlab.get_engine()
last_matplotlib_fig_num = fig_count + len(figure_list)
total_fig_num = last_matplotlib_fig_num + len(e.scenes)
mayavi_fig_nums = range(last_matplotlib_fig_num + 1, total_fig_num + 1)
for scene, mayavi_fig_num in zip(e.scenes, mayavi_fig_nums):
current_fig = image_path.format(mayavi_fig_num)
mlab.savefig(current_fig, figure=scene)
# make sure the image is not too large
scale_image(current_fig, current_fig, 850, 999)
figure_list.append(current_fig)
mlab.close(all=True)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
images_rst = ""
if len(figure_list) == 1:
figure_name = figure_list[0]
images_rst = SINGLE_IMAGE % figure_name.lstrip('/')
elif len(figure_list) > 1:
images_rst = HLIST_HEADER
for figure_name in figure_list:
images_rst += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
return figure_list, images_rst
def scale_image(in_fname, out_fname, max_width, max_height):
"""Scales an image with the same aspect ratio centered in an
image with a given max_width and max_height
if in_fname == out_fname the image can only be scaled down
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = max_width / float(width_in)
scale_h = max_height / float(height_in)
if height_in * scale_w <= max_height:
scale = scale_w
else:
scale = scale_h
if scale >= 1.0 and in_fname == out_fname:
return
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (max_width, max_height), (255, 255, 255))
pos_insert = ((max_width - width_sc) // 2, (max_height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the \
generated images')
def save_thumbnail(image_path_template, src_file, gallery_conf):
"""Save the thumbnail image"""
# read specification of the figure to display as thumbnail from main text
_, content = get_docstring_and_rest(src_file)
thumbnail_number = extract_thumbnail_number(content)
thumbnail_image_path = image_path_template.format(thumbnail_number)
thumb_dir = os.path.join(os.path.dirname(thumbnail_image_path), 'thumb')
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
base_image_name = os.path.splitext(os.path.basename(src_file))[0]
thumb_file = os.path.join(thumb_dir,
'sphx_glr_%s_thumb.png' % base_image_name)
if src_file in gallery_conf['failing_examples']:
broken_img = os.path.join(glr_path_static(), 'broken_example.png')
scale_image(broken_img, thumb_file, 200, 140)
elif os.path.exists(thumbnail_image_path):
scale_image(thumbnail_image_path, thumb_file, 400, 280)
elif not os.path.exists(thumb_file):
# create something to replace the thumbnail
default_thumb_file = os.path.join(glr_path_static(), 'no_image.png')
default_thumb_file = gallery_conf.get("default_thumb_file",
default_thumb_file)
scale_image(default_thumb_file, thumb_file, 200, 140)
def generate_dir_rst(src_dir, target_dir, gallery_conf, seen_backrefs):
"""Generate the gallery reStructuredText for an example directory"""
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
print(80 * '_')
print('Example directory %s does not have a README.txt file' %
src_dir)
print('Skipping this directory')
print(80 * '_')
return "", [] # because string is an expected return type
fhindex = open(os.path.join(src_dir, 'README.txt')).read()
# Add empty lines to avoid bug in issue #165
fhindex += "\n\n"
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = [fname for fname in sorted(os.listdir(src_dir))
if fname.endswith('.py')]
entries_text = []
computation_times = []
for fname in sorted_listdir:
amount_of_code, time_elapsed = \
generate_file_rst(fname, target_dir, src_dir, gallery_conf)
computation_times.append((time_elapsed, fname))
new_fname = os.path.join(src_dir, fname)
intro = extract_intro(new_fname)
write_backreferences(seen_backrefs, gallery_conf,
target_dir, fname, intro)
this_entry = _thumbnail_div(target_dir, fname, intro) + """
.. toctree::
:hidden:
/%s/%s\n""" % (target_dir, fname[:-3])
entries_text.append((amount_of_code, this_entry))
# sort to have the smallest entries in the beginning
entries_text.sort()
for _, entry_text in entries_text:
fhindex += entry_text
# clear at the end of the section
fhindex += """.. raw:: html\n
<div style='clear:both'></div>\n\n"""
return fhindex, computation_times
def execute_code_block(code_block, example_globals,
block_vars, gallery_conf):
"""Executes the code block of the example file"""
time_elapsed = 0
stdout = ''
# If example is not suitable to run, skip executing its blocks
if not block_vars['execute_script']:
return stdout, time_elapsed
plt.close('all')
cwd = os.getcwd()
# Redirect output to stdout and
orig_stdout = sys.stdout
src_file = block_vars['src_file']
try:
# First cd in the original example dir, so that any file
# created by the example get created in this directory
os.chdir(os.path.dirname(src_file))
my_buffer = MixedEncodingStringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
t_start = time()
# don't use unicode_literals at the top of this file or you get
# nasty errors here on Py2.7
exec(code_block, example_globals)
time_elapsed = time() - t_start
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue().strip().expandtabs()
# raise RuntimeError
if my_stdout:
stdout = CODE_OUTPUT.format(indent(my_stdout, u' ' * 4))
os.chdir(cwd)
fig_list, images_rst = save_figures(
block_vars['image_path'], block_vars['fig_count'], gallery_conf)
fig_num = len(fig_list)
except Exception:
formatted_exception = traceback.format_exc()
fail_example_warning = 80 * '_' + '\n' + \
'%s failed to execute correctly:' % src_file + \
formatted_exception + 80 * '_' + '\n'
warnings.warn(fail_example_warning)
fig_num = 0
images_rst = codestr2rst(formatted_exception, lang='pytb')
# Breaks build on first example error
# XXX This check can break during testing e.g. if you uncomment the
# `raise RuntimeError` by the `my_stdout` call, maybe use `.get()`?
if gallery_conf['abort_on_example_error']:
raise
# Stores failing file
gallery_conf['failing_examples'][src_file] = formatted_exception
block_vars['execute_script'] = False
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
code_output = u"\n{0}\n\n{1}\n\n".format(images_rst, stdout)
block_vars['fig_count'] += fig_num
return code_output, time_elapsed
def clean_modules():
"""Remove "unload" seaborn from the name space
After a script is executed it can load a variety of setting that one
does not want to influence in other examples in the gallery."""
# Horrible code to 'unload' seaborn, so that it resets
# its default when is load
# Python does not support unloading of modules
# https://bugs.python.org/issue9072
for module in list(sys.modules.keys()):
if 'seaborn' in module:
del sys.modules[module]
# Reset Matplotlib to default
plt.rcdefaults()
def generate_file_rst(fname, target_dir, src_dir, gallery_conf):
"""Generate the rst file for a given example.
Returns
-------
amount_of_code : int
character count of the corresponding python script in file
time_elapsed : float
seconds required to run the script
"""
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
script_blocks = split_code_and_text_blocks(src_file)
amount_of_code = sum([len(bcontent)
for blabel, bcontent in script_blocks
if blabel == 'code'])
if md5sum_is_current(example_file):
return amount_of_code, 0
image_dir = os.path.join(target_dir, 'images')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
base_image_name = os.path.splitext(fname)[0]
image_fname = 'sphx_glr_' + base_image_name + '_{0:03}.png'
image_path_template = os.path.join(image_dir, image_fname)
ref_fname = example_file.replace(os.path.sep, '_')
example_rst = """\n\n.. _sphx_glr_{0}:\n\n""".format(ref_fname)
filename_pattern = gallery_conf.get('filename_pattern')
execute_script = re.search(filename_pattern, src_file) and gallery_conf[
'plot_gallery']
example_globals = {
# A lot of examples contains 'print(__doc__)' for example in
# scikit-learn so that running the example prints some useful
# information. Because the docstring has been separated from
# the code blocks in sphinx-gallery, __doc__ is actually
# __builtin__.__doc__ in the execution context and we do not
# want to print it
'__doc__': '',
# Examples may contain if __name__ == '__main__' guards
# for in example scikit-learn if the example uses multiprocessing
'__name__': '__main__',
}
# A simple example has two blocks: one for the
# example introduction/explanation and one for the code
is_example_notebook_like = len(script_blocks) > 2
time_elapsed = 0
block_vars = {'execute_script': execute_script, 'fig_count': 0,
'image_path': image_path_template, 'src_file': src_file}
print('Executing file %s' % src_file)
for blabel, bcontent in script_blocks:
if blabel == 'code':
code_output, rtime = execute_code_block(bcontent,
example_globals,
block_vars,
gallery_conf)
time_elapsed += rtime
if is_example_notebook_like:
example_rst += codestr2rst(bcontent) + '\n'
example_rst += code_output
else:
example_rst += code_output
if 'sphx-glr-script-out' in code_output:
# Add some vertical space after output
example_rst += "\n\n|\n\n"
example_rst += codestr2rst(bcontent) + '\n'
else:
example_rst += text2string(bcontent) + '\n'
clean_modules()
# Writes md5 checksum if example has build correctly
# not failed and was initially meant to run(no-plot shall not cache md5sum)
if block_vars['execute_script']:
with open(example_file + '.md5', 'w') as file_checksum:
file_checksum.write(get_md5sum(example_file))
save_thumbnail(image_path_template, src_file, gallery_conf)
time_m, time_s = divmod(time_elapsed, 60)
example_nb = jupyter_notebook(script_blocks)
save_notebook(example_nb, example_file.replace('.py', '.ipynb'))
with codecs.open(os.path.join(target_dir, base_image_name + '.rst'),
mode='w', encoding='utf-8') as f:
example_rst += "**Total running time of the script:**" \
" ({0: .0f} minutes {1: .3f} seconds)\n\n".format(
time_m, time_s)
example_rst += CODE_DOWNLOAD.format(fname,
fname.replace('.py', '.ipynb'))
example_rst += SPHX_GLR_SIG
f.write(example_rst)
print("{0} ran in : {1:.2g} seconds\n".format(src_file, time_elapsed))
return amount_of_code, time_elapsed
| bsd-3-clause |
rubikloud/scikit-learn | sklearn/svm/tests/test_bounds.py | 280 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
vsoch/repofish | analysis/methods/4.explore_keywords.py | 1 | 4497 | from repofish.utils import save_json
from glob import glob
import pickle
import pandas
import numpy
import json
import os
import re
home = os.environ["HOME"]
base = "%s/data/pubmed" %os.environ["LAB"]
method_folder = "%s/methods" %(base)
repo_folder = "%s/repos" %(base)
files = glob("%s/*.json" %repo_folder)
# KEYWORDS
urls = []
pmids = []
keywords = []
for f in files:
print "Adding %s to list" %(f)
result = json.load(open(f,'r'))
pubmed_paper = str(result["pmid"])
urls = urls + result["github"]
if "keywords" in result:
if not isinstance(result["keywords"],list):
kw = [result["keywords"]]
else:
kw = result["keywords"]
keywords = keywords + kw
keywords = numpy.unique(keywords).tolist()
# Function to parse keywords
def parse_keywords(kw):
if isinstance(kw,dict):
if "#text" in kw:
return kw["#text"]
elif "italic" in kw:
if "named-content" in kw["italic"]:
return kw["italic"]["named-content"]["named-content"]["#text"]
elif "#text" in kw["italic"]:
return kw["italic"]["#text"]
else:
return kw["italic"]
elif "named-content" in kw:
if isinstance(kw["named-content"]["named-content"],dict):
return kw["named-content"]["named-content"]["#text"]
return kw["named-content"]["named-content"][0]["#text"]
elif "styled-content" in kw:
return kw["styled-content"]["#text"]
return kw.lower().strip()
# Some keywords are in italic
keywords_updated = []
for kw in keywords:
keywords_updated.append(parse_keywords(kw))
# Also do a count
df = pandas.DataFrame(columns=["count"])
for f in files:
print "Adding %s to list" %(f)
result = json.load(open(f,'r'))
if "keywords" in result:
if not isinstance(result["keywords"],list):
kw_list = [result["keywords"]]
else:
kw_list = result["keywords"]
for kw in kw_list:
kw_parsed = parse_keywords(kw)
if kw_parsed in df.index:
df.loc[kw_parsed,"count"] = df.loc[kw_parsed,"count"] + 1
else:
df.loc[kw_parsed,"count"] = 1
# One badly parsed
badly_parsed = "computational biology ; protein structure prediction ; model quality assessment programs ; boltzmann distribution ; annsen's thermodynamic hypothesis ; statistical potentials ; protein backbone ; decoy sets ;"
df = df.drop([badly_parsed])
badly_parsed = [x.strip(" ") for x in badly_parsed.split(";") if x]
for bp in badly_parsed:
if bp in df.index:
df.loc[bp,"count"] = df.loc[bp,"count"] + 1
else:
df.loc[bp,"count"] = 1
# Sort by counts
df = df.sort(columns=["count"],ascending=False)
df.to_csv("%s/keywords_counts.tsv" %base,sep="\t",encoding="utf-8")
df.to_json("%s/keywords_counts.json" %base)
# JOURNALS
journals = []
for f in files:
print "Adding %s to list" %(f)
result = json.load(open(f,'r'))
pubmed_paper = str(result["pmid"])
urls = urls + result["github"]
if "journal" in result:
journals.append(result["journal"])
journals = numpy.unique(journals).tolist()
df=pandas.DataFrame(0,index=journals,columns=["count"])
for f in files:
print "Adding %s to list" %(f)
result = json.load(open(f,'r'))
if "journal" in result:
df.loc[result["journal"],"count"] = df.loc[result["journal"],"count"] + 1
df.to_csv("%s/journals_count.tsv" %base,sep="\t")
df.to_json("%s/journals_count.json" %base)
# We need a lookup, for pmids based on journal or keyword
journal_lookup = dict()
keyword_lookup = dict()
for f in files:
print "Parsing %s" %(f)
result = json.load(open(f,'r'))
pmid = str(result["pmid"])
if "journal" in result:
journal = result["journal"]
if journal not in journal_lookup:
journal_lookup[journal] = [pmid]
else:
journal_lookup[journal].append(pmid)
if "keywords" in result:
if not isinstance(result["keywords"],list):
keys = [result["keywords"]]
else:
keys = result["keywords"]
for k in keys:
kw = parse_keywords(k)
if kw not in keyword_lookup:
keyword_lookup[kw] = [pmid]
else:
keyword_lookup[kw].append(pmid)
save_json(journal_lookup,"%s/journal_lookup.json"%base)
save_json(keyword_lookup,"%s/keyword_lookup.json"%base)
| mit |
aje/POT | docs/source/auto_examples/plot_otda_mapping_colors_images.py | 2 | 4175 | # -*- coding: utf-8 -*-
"""
=====================================================
OT for image color adaptation with mapping estimation
=====================================================
OT for domain adaptation with image color adaptation [6] with mapping
estimation [8].
[6] Ferradans, S., Papadakis, N., Peyre, G., & Aujol, J. F. (2014). Regularized
discrete optimal transport. SIAM Journal on Imaging Sciences, 7(3),
1853-1882.
[8] M. Perrot, N. Courty, R. Flamary, A. Habrard, "Mapping estimation for
discrete optimal transport", Neural Information Processing Systems (NIPS),
2016.
"""
# Authors: Remi Flamary <[email protected]>
# Stanislas Chambon <[email protected]>
#
# License: MIT License
import numpy as np
from scipy import ndimage
import matplotlib.pylab as pl
import ot
r = np.random.RandomState(42)
def im2mat(I):
"""Converts and image to matrix (one pixel per line)"""
return I.reshape((I.shape[0] * I.shape[1], I.shape[2]))
def mat2im(X, shape):
"""Converts back a matrix to an image"""
return X.reshape(shape)
def minmax(I):
return np.clip(I, 0, 1)
##############################################################################
# Generate data
# -------------
# Loading images
I1 = ndimage.imread('../data/ocean_day.jpg').astype(np.float64) / 256
I2 = ndimage.imread('../data/ocean_sunset.jpg').astype(np.float64) / 256
X1 = im2mat(I1)
X2 = im2mat(I2)
# training samples
nb = 1000
idx1 = r.randint(X1.shape[0], size=(nb,))
idx2 = r.randint(X2.shape[0], size=(nb,))
Xs = X1[idx1, :]
Xt = X2[idx2, :]
##############################################################################
# Domain adaptation for pixel distribution transfer
# -------------------------------------------------
# EMDTransport
ot_emd = ot.da.EMDTransport()
ot_emd.fit(Xs=Xs, Xt=Xt)
transp_Xs_emd = ot_emd.transform(Xs=X1)
Image_emd = minmax(mat2im(transp_Xs_emd, I1.shape))
# SinkhornTransport
ot_sinkhorn = ot.da.SinkhornTransport(reg_e=1e-1)
ot_sinkhorn.fit(Xs=Xs, Xt=Xt)
transp_Xs_sinkhorn = ot_emd.transform(Xs=X1)
Image_sinkhorn = minmax(mat2im(transp_Xs_sinkhorn, I1.shape))
ot_mapping_linear = ot.da.MappingTransport(
mu=1e0, eta=1e-8, bias=True, max_iter=20, verbose=True)
ot_mapping_linear.fit(Xs=Xs, Xt=Xt)
X1tl = ot_mapping_linear.transform(Xs=X1)
Image_mapping_linear = minmax(mat2im(X1tl, I1.shape))
ot_mapping_gaussian = ot.da.MappingTransport(
mu=1e0, eta=1e-2, sigma=1, bias=False, max_iter=10, verbose=True)
ot_mapping_gaussian.fit(Xs=Xs, Xt=Xt)
X1tn = ot_mapping_gaussian.transform(Xs=X1) # use the estimated mapping
Image_mapping_gaussian = minmax(mat2im(X1tn, I1.shape))
##############################################################################
# Plot original images
# --------------------
pl.figure(1, figsize=(6.4, 3))
pl.subplot(1, 2, 1)
pl.imshow(I1)
pl.axis('off')
pl.title('Image 1')
pl.subplot(1, 2, 2)
pl.imshow(I2)
pl.axis('off')
pl.title('Image 2')
pl.tight_layout()
##############################################################################
# Plot pixel values distribution
# ------------------------------
pl.figure(2, figsize=(6.4, 5))
pl.subplot(1, 2, 1)
pl.scatter(Xs[:, 0], Xs[:, 2], c=Xs)
pl.axis([0, 1, 0, 1])
pl.xlabel('Red')
pl.ylabel('Blue')
pl.title('Image 1')
pl.subplot(1, 2, 2)
pl.scatter(Xt[:, 0], Xt[:, 2], c=Xt)
pl.axis([0, 1, 0, 1])
pl.xlabel('Red')
pl.ylabel('Blue')
pl.title('Image 2')
pl.tight_layout()
##############################################################################
# Plot transformed images
# -----------------------
pl.figure(2, figsize=(10, 5))
pl.subplot(2, 3, 1)
pl.imshow(I1)
pl.axis('off')
pl.title('Im. 1')
pl.subplot(2, 3, 4)
pl.imshow(I2)
pl.axis('off')
pl.title('Im. 2')
pl.subplot(2, 3, 2)
pl.imshow(Image_emd)
pl.axis('off')
pl.title('EmdTransport')
pl.subplot(2, 3, 5)
pl.imshow(Image_sinkhorn)
pl.axis('off')
pl.title('SinkhornTransport')
pl.subplot(2, 3, 3)
pl.imshow(Image_mapping_linear)
pl.axis('off')
pl.title('MappingTransport (linear)')
pl.subplot(2, 3, 6)
pl.imshow(Image_mapping_gaussian)
pl.axis('off')
pl.title('MappingTransport (gaussian)')
pl.tight_layout()
pl.show()
| mit |
kinimesi/rscore | RScore.py | 1 | 1582 | #!/usr/bin/env python
__author__ = 'ilkin safarli'
import string, pickle
from nltk import word_tokenize
from sklearn.feature_extraction.text import TfidfVectorizer
class RScore(object):
"""
This class calculates readability score - based on English word frequency.
"""
def __init__(self):
self.score = 0
self.freq = pickle.load(open("word_freq.db","rb"))
def clear_text(self, file_name):
"""
Clear all punctuations
:param file_name: name of file
:return sting without punctuations
"""
text = open(file_name, 'r').read()
text = text.lower()
no_punctuation = text.translate(None, string.punctuation)
return no_punctuation
def tokenize(self, text):
"""
Tokenize text into words.
:param text: string
:return: tokenized text
"""
tokens = word_tokenize(text)
return tokens
def tf_idf(self, file_name):
"""
Calculate tf-idf score.
:param file_name: name of file
:return: dictionary where keys are words and values are tf scores.
"""
tfidf = TfidfVectorizer(tokenizer=self.tokenize, stop_words='english', norm="l1")
tfs = tfidf.fit_transform([self.clear_text(file_name)]).toarray()
word_tfidf = dict(zip(tfidf.get_feature_names(), tfs[0]))
return word_tfidf
def rscore(self, file_name):
"""
Calculates readability score.
:param file_name: name of file
:return: readability score
"""
self.score = 0
word_tfidf = self.tf_idf(file_name)
for word in word_tfidf:
try:
self.score += self.freq[word]*word_tfidf[word]
except:
continue
return (self.score*10000.0)/len(word_tfidf) # scaling result
| apache-2.0 |
Balandat/cont_no_regret | old_code/Polynomial_Normbounds.py | 1 | 4213 | # Set up infrastructure and basic problem parameters
import matplotlib as mpl
mpl.use('Agg')
import multiprocessing as mp
import numpy as np
import datetime, os
from ContNoRegret.NoRegretAlgos import ContNoRegretProblem
from ContNoRegret.Domains import nBox, UnionOfDisjointnBoxes, DifferenceOfnBoxes, unitbox, hollowbox
from ContNoRegret.LossFunctions import random_PolynomialLosses, random_AffineLosses, random_QuadraticLosses, PolynomialLossFunction
from ContNoRegret.NoRegretAlgos import ContNoRegretProblem
from ContNoRegret.utils import CNR_worker, plot_results, save_results, circular_tour
from ContNoRegret.animate import save_animations
from ContNoRegret.Potentials import (ExponentialPotential, IdentityPotential, pNormPotential, CompositePotential,
ExpPPotential, pExpPotential, HuberPotential, LogtasticPotential, FractionalLinearPotential)
from ContNoRegret.loss_params import *
# this is the location of the folder for the results
results_path = '/home/max/Documents/CNR_results/'
desc = 'NIPS2_CNR_PolyNormBounds'
tmpfolder = '/media/tmp/' # if possible, choose this to be a RamDisk
# some flags for keeping a record of the simulation parameters
save_res = True
show_plots = False
save_anims = False
show_anims = False
#coeffs = coeffs + coeffs + coeffs
#exponents = exponents + exponents + exponents
T = 100 # Time horizon
L = 5.0 # Uniform bound on the Lipschitz constant
N = 2500 # Number of parallel algorithm instances
Ngrid = 500000 # Number of gridpoints for the sampling step
dom = unitbox(3)
nus = [0.05, 1]
# before running the computation, read this file so we can later save a copy in the results folder
with open(__file__, 'r') as f:
thisfile = f.read()
# lossfuncs = []
# while len(lossfuncs) < T:
# tmpfuncs = np.array(random_PolynomialLosses(dom, 10, M, L, 4, [0,1,2,3,4]))
# normbounds = {nu: np.array([lossfunc.norm(2/nu, tmpfolder=tmpfolder) for lossfunc in tmpfuncs]) for nu in nus}
# Ms = {nu: np.array(normbounds[nu]) for nu in nus}
# for i in range(len(normbounds)):
# if normbounds[nus[0]][i]/normbounds[nus[1]][i] > 5:
# lossfuncs.append(tmpfuncs[i])
# bootstrap by sampling from funcitons
idx = np.random.choice(len(coeffs), T)
coeffs = [coeffs[i] for i in idx]
exponents = [exponents[i] for i in idx]
lossfuncs = [PolynomialLossFunction(dom, coeff, expo) for coeff,expo in zip(coeffs,exponents)]
Minf, M2 = np.max(inf_norms), np.max(two_norms)
# create Continuous No-Regret problem
prob = ContNoRegretProblem(dom, lossfuncs, L, Minf, desc='PolyNormBounds')
# Select a number of potentials for the Dual Averaging algorithm
potentials = [ExponentialPotential(), pNormPotential(1.05, M=Minf), IdentityPotential(M=M2)]
#[ExponentialPotential(), pNormPotential(1.05, M=Minf), pNormPotential(2, M=M2)]
# the following runs fine if the script is the __main__ method, but crashes when running from ipython
pool = mp.Pool(processes=mp.cpu_count()-1)
processes = []
DAkwargs = [{'opt_rate':True, 'Ngrid':Ngrid, 'potential':pot, 'pid':i,
'tmpfolder':tmpfolder, 'label':'norm_'+pot.desc} for i,pot in enumerate(potentials)]
processes += [pool.apply_async(CNR_worker, (prob, N, 'DA'), kwarg) for kwarg in DAkwargs]
# wait for the processes to finish an collect the results
results = [process.get() for process in processes]
print(results)
# # plot results and/or save a persistent copy (pickled) of the detailed results
# timenow = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')# create a time stamp for unambiguously naming the results folder
# results_directory = '{}{}/'.format(results_path, timenow)
#
# if save_res:
# os.makedirs(results_directory, exist_ok=True) # this could probably use a safer implementation
# plot_results(results, 100, results_directory, show_plots)
# if save_anims:
# save_animations(results, 10, results_directory, show_anims)
# save_results(results, results_directory)
# # store the previously read-in contents of this file in the results folder
# with open(results_directory+str(__file__), 'w') as f:
# f.write(thisfile)
# else:
# plot_results(results, offset=100)
| mit |
idealabasu/code_pynamics | python/pynamics/signal_approx.py | 1 | 1067 | # -*- coding: utf-8 -*-
"""
Created on Wed May 5 15:01:40 2021
@author: danaukes
"""
import sympy
sympy.init_session(use_latex=False,quiet=True)
# from math import pi
from sympy import pi
from sympy import sin,cos,acos,atan
arctan = lambda x: atan(x)
arccos = lambda x: acos(x)
# from numpy import sin,cos,arccos,arctan
# import numpy.arccos as acos
import matplotlib.pyplot as plt
x = sympy.Symbol('x')
d = sympy.Symbol('d')
trg = 1 - 2*arccos((1 - d)*sin(2*pi*x))/pi
sqr = 2*arctan(sin(2*pi*x)/d)/pi
f_trg = lambda x,d:(1 - 2*arccos((1 - d)*sin(2*pi*x))/pi)
f_sqr = lambda x,d: (2*arctan(sin(2*pi*x)/d)/pi)
swt = ((1 + f_trg((2*x - 1)/4,d)*f_sqr(x/2,d))/2)
f_swt = lambda x,d: ((1 + f_trg((2*x - 1)/4,d)*f_sqr(x/2,d))/2)
if __name__=='__main__':
import numpy
x_num = numpy.r_[-2:2:.01]
d_num = .01
f_trg2 = sympy.lambdify((x,d),trg)
f_sqr2 = sympy.lambdify((x,d),sqr)
f_swt2 = sympy.lambdify((x,d),swt)
plt.plot(x_num,f_trg2(x_num,d_num))
plt.plot(x_num,f_sqr2(x_num,d_num))
plt.plot(x_num,f_swt2(x_num,d_num))
| mit |
trafferty/utils | python/parseDIFLog_plot.py | 1 | 6465 | #!/usr/bin/env python
import sys
import time
import re
import argparse
import json
import matplotlib.pyplot as plt
import numpy as np
import datetime as dt
def parseDIFLog(DIFLog):
'''
import re
p = re.compile(ur'\ (?P<start1_ts>[0-9:]*),\ (?P<dif_mem>[0-9.]*),\ (?P<dif_cpu>[0-9.]*),\ (?P<sys_mem>[0-9.]*)\n', re.DOTALL)
test_str = u"militho 5889 0.0 ? 24100 6844 /dev/pts/13 16:29 00:00 bash\nroot 5921 0.0 ? 89564 4572 /dev/pts/11 16:29 00:00 sudo\nmilitho 5922 0.0 ? 7208 1860 /dev/pts/11 16:29 00:04 tee\nroot 5923 0.0 0.9 180140 140808 /dev/pts/11 16:29 00:01 gdb\nroot 5925 91.3 20.1 4870632 3284572 /dev/pts/11 16:29 11:16 dif\nmilitho 5960 0.3 0.3 913356 41640 /dev/pts/12 16:29 00:58 python3\nmilitho 5992 0.1 0.1 47908 12636 /dev/pts/13 16:31 00:11 python3\nmilitho 5993 0.0 ? 7208 1752 /dev/pts/13 16:31 00:00 tee\nmilitho 6001 0.0 ? 24080 6828 /dev/pts/6 16:31 00:00 bash\nroot 6205 0.2 ? ? ? ? 17:28 00:08 kworker/u66:1\nroot 6229 0.0 ? ? ? ? 17:52 00:00 kworker/3:0\nroot 6230 0.0 ? ? ? ? 17:53 00:05 kworker/0:0\nroot 6240 0.0 ? ? ? ? 18:03 00:00 kworker/1:0\nroot 6242 0.0 ? ? ? ? 18:04 00:02 kworker/5:0\nroot 6249 0.0 ? ? ? ? 18:10 00:01 kworker/6:2\nroot 6255 0.0 ? ? ? ? 18:15 00:01 kworker/7:0\nroot 6261 0.0 ? ? ? ? 18:18 00:00 kworker/u65:0\nroot 6263 0.0 ? ? ? ? 18:24 00:00 kworker/2:0\nroot 6264 0.0 ? ? ? ? 18:25 00:07 kworker/4:2\nroot 6271 0.0 ? ? ? ? 18:33 00:02 kworker/0:2\nroot 6274 0.3 ? ? ? ? 18:36 00:07 kworker/4:3\nroot 6276 0.0 ? ? ? ? 18:39 00:00 kworker/u65:2\nroot 6277 0.0 ? ? ? ? 18:41 00:00 kworker/3:1\nroot 6278 0.0 ? ? ? ? 18:42 00:00 kworker/u66:2\nroot 6283 0.0 ? ? ? ? 18:50 00:00 kworker/u66:0\nNone\nWed 18:53:01, 20.083838, 91.80, 25.30\nWed 18:53:11, 20.216892, 98.90, 25.80\nWed 18:53:21, 20.678593, 90.40, 25.80\nWed 18:53:31, 20.810742, 93.00, 26.40\nWed 18:53:41, 21.272688, 88.00, 26.70\nWed 18:53:51, 21.602656, 95.20, 27.00\nWed 18:54:01, 21.867760, 85.90, 27.10\nWed 18:54:11, 21.999933, 93.80, 27.60\nWed 18:54:21, 22.462319, 86.50, 27.90\nWed 18:54:31, 22.791798, 92.90, 28.00\nUSER PID %CPU %MEM VSZ RSS TTY START TIME COMMAND\nroot 1 0.0 ? 33912 4256 ? 08:57 00:56 init\nroot 2 0.0 ? ? ? ? 08:57 00:00 kthreadd\nroot 3 0.0 ? ? ? ? 08:57 00:01 ksoftirqd/0\nroot 5 0.0 ? ? ? ? 08:57 00:00 kworker/0:0H\nroot 6 0.0 ? ? ? ? 08:57 00:00 kworker/u64:0\nroot 8 0.1 ? ? ? ? 08:57 01:43 rcu_sched\nroot 9 0.0 ? ? ? ? 08:57 00:00 rcu_bh\n"
re.findall(p, test_str) '''
DIF_ps_pattern=ur'\ (?P<log_ts>[0-9:]*),\ (?P<dif_mem>[0-9.]*),\ (?P<dif_cpu>[0-9.]*),\ (?P<sys_mem>[0-9.]*)\n'
f = open(DIFLog, 'r')
buf = f.read()
f.close()
print "File (%s) opened and read into buffer, length of buf: %d" % (DIFLog, len(buf))
DIF_ps_sets = [x.groupdict() for x in re.finditer(DIF_ps_pattern, buf)]
print "Parsing log for DIF_ps...found %d records." % (len(DIF_ps_sets))
if len(DIF_ps_sets) > 0: print " >> Timestamp range: %s - %s" % (DIF_ps_sets[0]['log_ts'], DIF_ps_sets[-1]['log_ts'])
timestamp_format = "%H:%M:%S"
start_ts = dt.datetime.strptime(DIF_ps_sets[0]['log_ts'], timestamp_format)
DIF_mem = []
DIF_cpu = []
sys_mem = []
elapsed_times = []
for idx, DIF_ps_set in enumerate(DIF_ps_sets):
'''
Wed 18:53:01, 20.083838, 91.80, 25.30
Wed 18:53:11, 20.216892, 98.90, 25.80
Wed 18:53:21, 20.678593, 90.40, 25.80
Wed 18:53:31, 20.810742, 93.00, 26.40
Wed 18:53:41, 21.272688, 88.00, 26.70
Wed 18:53:51, 21.602656, 95.20, 27.00
Wed 18:54:01, 21.867760, 85.90, 27.10
Wed 18:54:11, 21.999933, 93.80, 27.60
Wed 18:54:21, 22.462319, 86.50, 27.90
Wed 18:54:31, 22.791798, 92.90, 28.00
'''
#if idx % 1 == 0:
log_ts = dt.datetime.strptime(DIF_ps_set['log_ts'], timestamp_format)
delta = log_ts - start_ts
elapsed_times.append(delta.seconds/10)
DIF_mem.append(DIF_ps_set['dif_mem'])
DIF_cpu.append(DIF_ps_set['dif_cpu'])
sys_mem.append(DIF_ps_set['sys_mem'])
DIF_mem_np = np.array(DIF_mem)
DIF_cpu_np = np.array(DIF_cpu)
sys_mem_np = np.array(sys_mem)
elapsed_times_np = np.array(elapsed_times)
print("%d -> %d" % (elapsed_times[0], elapsed_times[-1]))
xticks = np.arange(min(elapsed_times), max(elapsed_times)+1, len(elapsed_times)/12)
print(xticks)
print(len(DIF_mem))
if len(DIF_mem) > 0:
fig = plt.figure(figsize=(10*2,5))
ax = fig.add_subplot(111)
ax.set_title('DIF Percent Memory')
ax.set_ylabel('% mem')
ax.set_xlabel('Elapsed time (each point is 10s)')
#ax.xaxis.set_ticks(ange(len(elapsed_times)), elapsed_times_np)
ax.xaxis.set_ticks(xticks)
ax.plot(DIF_mem_np, color='b', label='DIF % mem')
ax.plot(sys_mem_np, color='g', label='System % mem')
ax.legend()
plt.show()
if __name__ == "__main__":
'''
parseDIFLog.py -i file_to_parse
'''
parser = argparse.ArgumentParser(description='open process log file, parse it according to parse function')
parser.add_argument('-i', '--in_file', dest='in_file', type=str,
help='input file...if not specified then use stdin')
args = parser.parse_args()
if args.in_file:
parseDIFLog(args.in_file)
else:
parser.print_help()
sys.exit(1)
| gpl-2.0 |
IntelPNI/brainiak | brainiak/factoranalysis/htfa.py | 6 | 28942 | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hierarchical Topographical Factor Analysis (HTFA)
This implementation is based on the work in [Manning2014-1]_, [Manning2014-2]_,
[AndersonMJ2016]_, and [Manning2018]_.
.. [Manning2014-1] "Topographic factor analysis: a bayesian model for
inferring brain networks from neural data", J. R. Manning,
R. Ranganath, K. A. Norman, and D. M. Blei. PLoS One, vol. 9, no. 5,
2014.
.. [Manning2014-2] "Hierarchical topographic factor analysis", Jeremy. R.
Manning, R. Ranganath, W. Keung, N. B. Turk-Browne, J. D.Cohen,
K. A. Norman, and D. M. Blei. Pattern Recognition in Neuroimaging,
2014 International Workshop on, June 2014.
.. [Manning2018] "A Probabilistic Approach to Discovering Dynamic Full-brain
Functional Connectivit Patterns", J. R. Manning, X. Zhu, T.L. Willke,
R. Ranganath, K. Stachenfeld, U. Hasson, D. M. Blei and K. A. Norman.
Neuroimage, 2018.
https://doi.org/10.1016/j.neuroimage.2018.01.071
.. [AndersonMJ2016] "Enabling Factor Analysis on Thousand-Subject Neuroimaging
Datasets",
Michael J. Anderson, Mihai Capotă, Javier S. Turek, Xia Zhu, Theodore L.
Willke, Yida Wang, Po-Hsuan Chen, Jeremy R. Manning, Peter J. Ramadge,
Kenneth A. Norman,
IEEE International Conference on Big Data, 2016.
https://doi.org/10.1109/BigData.2016.7840719
"""
# Authors: Xia Zhu (Intel Labs), Jeremy Manning (Dartmouth College) 2015~2016
import numpy as np
from mpi4py import MPI
from scipy.optimize import linear_sum_assignment
from sklearn.metrics import mean_squared_error
from scipy.spatial import distance
import logging
from .tfa import TFA
from ..utils.utils import from_tri_2_sym, from_sym_2_tri
__all__ = [
"HTFA",
]
logger = logging.getLogger(__name__)
class HTFA(TFA):
"""Hierarchical Topographical Factor Analysis (HTFA)
Given multi-subject data, HTFA factorizes data from each subject as a
spatial factor F and a weight matrix W per subject. Also at top
level, it estimates global template across subjects:
Parameters
----------
K : int
Number of factors to compute.
n_subj : int
Total number of subjects in dataset.
max_global_iter : int, default: 10
Number of global iterations to run the algorithm.
max_local_iter : int, default: 10
Number of local iterations to run on each subject within each
global interation.
threshold : float, default: 1.0
Tolerance for terminate the parameter estimation
nlss_method : {'trf', 'dogbox', 'lm'}, default: 'trf'
Non-Linear Least Square (NLSS) algorithm used by scipy.least_suqares to
perform minimization. More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
nlss_loss: str or callable, default: 'linear'
Loss function used by scipy.least_squares.
More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
jac : {'2-point', '3-point', 'cs', callable}, default: '2-point'
Method of computing the Jacobian matrix.
More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
x_scale : float or array_like or 'jac', default: 1.0
Characteristic scale of each variable for scipy.least_suqares.
More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
tr_solver: {None, 'exact', 'lsmr'}, default: None
Method for solving trust-region subproblems, relevant only for 'trf'
and 'dogbox' methods.
More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
weight_method : {'rr','ols'}, default: 'rr'
Method for estimating weight matrix W given X and F.
'rr' means ridge regression, 'ols' means ordinary least square.
upper_ratio : float, default: 1.8
The upper bound of the ratio between factor's width and brain diameter.
lower_ratio : float, default: 0.02
The lower bound of the ratio between factor's width and brain diameter.
voxel_ratio : float, default: 0.25
The percentage of voxels to sample in each inner iteration.
tr_ratio : float, default: 0.1
The percentage of trs to sample in each inner iteration.
max_voxel : int, default: 5000
The maximum number of voxels to sample in each inner iteration.
max_tr : int, default: 500
The maximum number of trs to sample in each inner iteration.
comm : Intracomm
MPI communication group, default MPI.COMM_WORLD
verbose : boolean, default: False
Verbose mode flag.
Attributes
----------
global_prior_ : 1D array
The global prior on mean and variance of centers and widths.
global_posterior_ : 1D array
The global posterior on mean and variance of centers and widths.
local_posterior_ : 1D array
Local posterior on centers and widths of subjects allocated
to this process.
local_weights_ : 1D array
Local posterior on weights allocated to this process.
Notes
-----
We recommend to use data in MNI space to better interpret global template
"""
def __init__(self, K, n_subj, max_global_iter=10, max_local_iter=10,
threshold=0.01, nlss_method='trf', nlss_loss='soft_l1',
jac='2-point', x_scale='jac', tr_solver=None,
weight_method='rr', upper_ratio=1.8, lower_ratio=0.02,
voxel_ratio=0.25, tr_ratio=0.1, max_voxel=5000, max_tr=500,
comm=MPI.COMM_WORLD, verbose=False):
self.K = K
self.n_subj = n_subj
self.max_global_iter = max_global_iter
self.max_local_iter = max_local_iter
self.threshold = threshold
self.nlss_method = nlss_method
self.nlss_loss = nlss_loss
self.jac = jac
self.x_scale = x_scale
self.tr_solver = tr_solver
self.weight_method = weight_method
self.upper_ratio = upper_ratio
self.lower_ratio = lower_ratio
self.voxel_ratio = voxel_ratio
self.tr_ratio = tr_ratio
self.max_voxel = max_voxel
self.max_tr = max_tr
self.comm = comm
self.verbose = verbose
def _converged(self):
"""Check convergence based on maximum absolute difference
Returns
-------
converged : boolean
Whether the parameter estimation converged.
max_diff : float
Maximum absolute difference between prior and posterior.
"""
prior = self.global_prior_[0:self.prior_size]
posterior = self.global_posterior_[0:self.prior_size]
diff = prior - posterior
max_diff = np.max(np.fabs(diff))
if self.verbose:
_, mse = self._mse_converged()
diff_ratio = np.sum(diff ** 2) / np.sum(posterior ** 2)
logger.info(
'htfa prior posterior max diff %f mse %f diff_ratio %f' %
((max_diff, mse, diff_ratio)))
if max_diff > self.threshold:
return False, max_diff
else:
return True, max_diff
def _mse_converged(self):
"""Check convergence based on mean squared difference between
prior and posterior
Returns
-------
converged : boolean
Whether the parameter estimation converged.
mse : float
Mean squared error between prior and posterior.
"""
prior = self.global_prior_[0:self.prior_size]
posterior = self.global_posterior_[0:self.prior_size]
mse = mean_squared_error(prior, posterior,
multioutput='uniform_average')
if mse > self.threshold:
return False, mse
else:
return True, mse
def _map_update(
self,
prior_mean,
prior_cov,
global_cov_scaled,
new_observation):
"""Maximum A Posterior (MAP) update of a parameter
Parameters
----------
prior_mean : float or 1D array
Prior mean of parameters.
prior_cov : float or 1D array
Prior variance of scalar parameter, or
prior covariance of multivariate parameter
global_cov_scaled : float or 1D array
Global prior variance of scalar parameter, or
global prior covariance of multivariate parameter
new_observation : 1D or 2D array, with shape [n_dim, n_subj]
New observations on parameters.
Returns
-------
posterior_mean : float or 1D array
Posterior mean of parameters.
posterior_cov : float or 1D array
Posterior variance of scalar parameter, or
posterior covariance of multivariate parameter
"""
common = np.linalg.inv(prior_cov + global_cov_scaled)
observation_mean = np.mean(new_observation, axis=1)
posterior_mean = prior_cov.dot(common.dot(observation_mean)) +\
global_cov_scaled.dot(common.dot(prior_mean))
posterior_cov =\
prior_cov.dot(common.dot(global_cov_scaled))
return posterior_mean, posterior_cov
def _map_update_posterior(self):
"""Maximum A Posterior (MAP) update of HTFA parameters
Returns
-------
HTFA
Returns the instance itself.
"""
self.global_posterior_ = self.global_prior_.copy()
prior_centers = self.get_centers(self.global_prior_)
prior_widths = self.get_widths(self.global_prior_)
prior_centers_mean_cov = self.get_centers_mean_cov(self.global_prior_)
prior_widths_mean_var = self.get_widths_mean_var(self.global_prior_)
center_size = self.K * self.n_dim
posterior_size = center_size + self.K
for k in np.arange(self.K):
next_centers = np.zeros((self.n_dim, self.n_subj))
next_widths = np.zeros(self.n_subj)
for s in np.arange(self.n_subj):
center_start = s * posterior_size
width_start = center_start + center_size
start_idx = center_start + k * self.n_dim
end_idx = center_start + (k + 1) * self.n_dim
next_centers[:, s] = self.gather_posterior[start_idx:end_idx]\
.copy()
next_widths[s] = self.gather_posterior[width_start + k].copy()
# centers
posterior_mean, posterior_cov = self._map_update(
prior_centers[k].T.copy(),
from_tri_2_sym(prior_centers_mean_cov[k], self.n_dim),
self.global_centers_cov_scaled,
next_centers)
self.global_posterior_[k * self.n_dim:(k + 1) * self.n_dim] =\
posterior_mean.T
start_idx = self.map_offset[2] + k * self.cov_vec_size
end_idx = self.map_offset[2] + (k + 1) * self.cov_vec_size
self.global_posterior_[start_idx:end_idx] =\
from_sym_2_tri(posterior_cov)
# widths
common = 1.0 /\
(prior_widths_mean_var[k] + self.global_widths_var_scaled)
observation_mean = np.mean(next_widths)
tmp = common * self.global_widths_var_scaled
self.global_posterior_[self.map_offset[1] + k] = \
prior_widths_mean_var[k] * common * observation_mean +\
tmp * prior_widths[k]
self.global_posterior_[self.map_offset[3] + k] = \
prior_widths_mean_var[k] * tmp
return self
def _get_gather_offset(self, size):
"""Calculate the offset for gather result from this process
Parameters
----------
size : int
The total number of process.
Returns
-------
tuple_size : tuple_int
Number of elements to send from each process
(one integer for each process)
tuple_offset : tuple_int
Number of elements away from the first element
in the array at which to begin the new, segmented
array for a process
(one integer for each process)
subject_map : dictionary
Mapping between global subject id to local id
"""
gather_size = np.zeros(size).astype(int)
gather_offset = np.zeros(size).astype(int)
num_local_subjs = np.zeros(size).astype(int)
subject_map = {}
for idx, s in enumerate(np.arange(self.n_subj)):
cur_rank = idx % size
gather_size[cur_rank] += self.prior_size
subject_map[idx] = (cur_rank, num_local_subjs[cur_rank])
num_local_subjs[cur_rank] += 1
for idx in np.arange(size - 1) + 1:
gather_offset[idx] = gather_offset[idx - 1] + gather_size[idx - 1]
tuple_size = tuple(gather_size)
tuple_offset = tuple(gather_offset)
return tuple_size, tuple_offset, subject_map
def _get_weight_size(self, data, n_local_subj):
"""Calculate the size of weight for this process
Parameters
----------
data : a list of 2D array, each in shape [n_voxel, n_tr]
The fMRI data from multi-subject.
n_local_subj : int
Number of subjects allocated to this process.
Returns
-------
weight_size : 1D array
The size of total subject weight on this process.
local_weight_offset : 1D array
Number of elements away from the first element
in the combined weight array at which to begin
the new, segmented array for a subject
"""
weight_size = np.zeros(1).astype(int)
local_weight_offset = np.zeros(n_local_subj).astype(int)
for idx, subj_data in enumerate(data):
if idx > 0:
local_weight_offset[idx] = weight_size[0]
weight_size[0] += self.K * subj_data.shape[1]
return weight_size, local_weight_offset
def _get_subject_info(self, n_local_subj, data):
"""Calculate metadata for subjects allocated to this process
Parameters
----------
n_local_subj : int
Number of subjects allocated to this process.
data : list of 2D array. Each in shape [n_voxel, n_tr]
Total number of MPI process.
Returns
-------
max_sample_tr : 1D array
Maximum number of TR to subsample for each subject
max_sample_voxel : 1D array
Maximum number of voxel to subsample for each subject
"""
max_sample_tr = np.zeros(n_local_subj).astype(int)
max_sample_voxel = np.zeros(n_local_subj).astype(int)
for idx in np.arange(n_local_subj):
nvoxel = data[idx].shape[0]
ntr = data[idx].shape[1]
max_sample_voxel[idx] =\
min(self.max_voxel, int(self.voxel_ratio * nvoxel))
max_sample_tr[idx] = min(self.max_tr, int(self.tr_ratio * ntr))
return max_sample_tr, max_sample_voxel
def _get_mpi_info(self):
"""get basic MPI info
Returns
-------
comm : Intracomm
Returns MPI communication group
rank : integer
Returns the rank of this process
size : integer
Returns total number of processes
"""
rank = self.comm.Get_rank()
size = self.comm.Get_size()
return rank, size
def _init_prior_posterior(self, rank, R, n_local_subj):
"""set prior for this subject
Parameters
----------
rank : integer
The rank of this process
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
n_local_subj : integer
The number of subjects allocated to this process.
Returns
-------
HTFA
Returns the instance itself.
"""
if rank == 0:
idx = np.random.choice(n_local_subj, 1)
self.global_prior_, self.global_centers_cov,\
self.global_widths_var = self.get_template(R[idx[0]])
self.global_centers_cov_scaled =\
self.global_centers_cov / float(self.n_subj)
self.global_widths_var_scaled =\
self.global_widths_var / float(self.n_subj)
self.gather_posterior = np.zeros(self.n_subj * self.prior_size)
self.global_posterior_ = np.zeros(self.prior_size)
else:
self.global_prior_ = np.zeros(self.prior_bcast_size)
self.global_posterior_ = None
self.gather_posterior = None
return self
def _gather_local_posterior(self, use_gather,
gather_size, gather_offset):
"""Gather/Gatherv local posterior
Parameters
----------
comm : object
MPI communication group
use_gather : boolean
Whether to use Gather or Gatherv
gather_size : 1D array
The size of each local posterior
gather_offset : 1D array
The offset of each local posterior
Returns
-------
HTFA
Returns the instance itself.
Notes
-----
We use numpy array rather than generic Python objects for MPI
communication because Gatherv is only supported for the former.
https://pythonhosted.org/mpi4py/usrman/tutorial.html
"""
if use_gather:
self.comm.Gather(self.local_posterior_,
self.gather_posterior, root=0)
else:
target = [
self.gather_posterior,
gather_size,
gather_offset,
MPI.DOUBLE]
self.comm.Gatherv(self.local_posterior_, target)
return self
def _assign_posterior(self):
"""assign posterior to the right prior based on
Hungarian algorithm
Returns
-------
HTFA
Returns the instance itself.
"""
prior_centers = self.get_centers(self.global_prior_)
posterior_centers = self.get_centers(self.global_posterior_)
posterior_widths = self.get_widths(self.global_posterior_)
posterior_centers_mean_cov =\
self.get_centers_mean_cov(self.global_posterior_)
posterior_widths_mean_var =\
self.get_widths_mean_var(self.global_posterior_)
# linear assignment on centers
cost = distance.cdist(prior_centers, posterior_centers, 'euclidean')
_, col_ind = linear_sum_assignment(cost)
# reorder centers/widths based on cost assignment
self.set_centers(self.global_posterior_, posterior_centers)
self.set_widths(self.global_posterior_, posterior_widths)
# reorder cov/var based on cost assignment
self.set_centers_mean_cov(
self.global_posterior_,
posterior_centers_mean_cov[col_ind])
self.set_widths_mean_var(
self.global_posterior_,
posterior_widths_mean_var[col_ind])
return self
def _update_global_posterior(
self, rank, m, outer_converged):
"""Update global posterior and then check convergence
Parameters
----------
rank : integer
The rank of current process.
m : integer
The outer iteration number of HTFA.
outer_converged : 1D array
Record whether HTFA loop converged
Returns
-------
1D array, contains only 1 element for MPI
1 means HTFA converged, 0 means not converged.
"""
if rank == 0:
self._map_update_posterior()
self._assign_posterior()
is_converged, _ = self._converged()
if is_converged:
logger.info("converged at %d outer iter" % (m))
outer_converged[0] = 1
else:
self.global_prior_ = self.global_posterior_
return outer_converged
def _update_weight(self, data, R, n_local_subj, local_weight_offset):
"""update local weight
Parameters
----------
data : list of 2D array, element i has shape=[n_voxel, n_tr]
Subjects' fMRI data.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
n_local_subj : integer
Number of subjects allocated to this process.
local_weight_offset : 1D array
Offset of each subject's weights on this process.
Returns
-------
HTFA
Returns the instance itself.
"""
for s, subj_data in enumerate(data):
base = s * self.prior_size
centers = self.local_posterior_[base:base + self.K * self.n_dim]\
.reshape((self.K, self.n_dim))
start_idx = base + self.K * self.n_dim
end_idx = base + self.prior_size
widths = self.local_posterior_[start_idx:end_idx]\
.reshape((self.K, 1))
unique_R, inds = self.get_unique_R(R[s])
F = self.get_factors(unique_R, inds, centers, widths)
start_idx = local_weight_offset[s]
if s == n_local_subj - 1:
self.local_weights_[start_idx:] =\
self.get_weights(subj_data, F).ravel()
else:
end_idx = local_weight_offset[s + 1]
self.local_weights_[start_idx:end_idx] =\
self.get_weights(subj_data, F).ravel()
return self
def _fit_htfa(self, data, R):
"""HTFA main algorithm
Parameters
----------
data : list of 2D array. Each in shape [n_voxel, n_tr]
The fMRI data from multiple subjects.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
Returns
-------
HTFA
Returns the instance itself.
"""
rank, size = self._get_mpi_info()
use_gather = True if self.n_subj % size == 0 else False
n_local_subj = len(R)
max_sample_tr, max_sample_voxel =\
self._get_subject_info(n_local_subj, data)
tfa = []
# init tfa for each subject
for s, subj_data in enumerate(data):
tfa.append(TFA(
max_iter=self.max_local_iter,
threshold=self.threshold,
K=self.K,
nlss_method=self.nlss_method,
nlss_loss=self.nlss_loss,
x_scale=self.x_scale,
tr_solver=self.tr_solver,
weight_method=self.weight_method,
upper_ratio=self.upper_ratio,
lower_ratio=self.lower_ratio,
verbose=self.verbose,
max_num_tr=max_sample_tr[s],
max_num_voxel=max_sample_voxel[s]))
# map data to processes
gather_size, gather_offset, subject_map =\
self._get_gather_offset(size)
self.local_posterior_ = np.zeros(n_local_subj * self.prior_size)
self._init_prior_posterior(rank, R, n_local_subj)
node_weight_size, local_weight_offset =\
self._get_weight_size(data, n_local_subj)
self.local_weights_ = np.zeros(node_weight_size[0])
m = 0
outer_converged = np.array([0])
while m < self.max_global_iter and not outer_converged[0]:
if(self.verbose):
logger.info("HTFA global iter %d " % (m))
# root broadcast first 4 fields of global_prior to all nodes
self.comm.Bcast(self.global_prior_, root=0)
# each node loop over its data
for s, subj_data in enumerate(data):
# update tfa with current local prior
tfa[s].set_prior(self.global_prior_[0:self.prior_size].copy())
tfa[s].set_seed(m * self.max_local_iter)
tfa[s].fit(
subj_data,
R=R[s],
template_prior=self.global_prior_.copy())
tfa[s]._assign_posterior()
start_idx = s * self.prior_size
end_idx = (s + 1) * self.prior_size
self.local_posterior_[start_idx:end_idx] =\
tfa[s].local_posterior_
self._gather_local_posterior(
use_gather,
gather_size,
gather_offset)
# root updates global_posterior
outer_converged =\
self._update_global_posterior(rank, m, outer_converged)
self.comm.Bcast(outer_converged, root=0)
m += 1
# update weight matrix for each subject
self._update_weight(
data,
R,
n_local_subj,
local_weight_offset)
return self
def _check_input(self, X, R):
"""Check whether input data and coordinates in right type
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
Returns
-------
HTFA
Returns the instance itself.
"""
# Check data type
if not isinstance(X, list):
raise TypeError("Input data should be a list")
if not isinstance(R, list):
raise TypeError("Coordinates should be a list")
# Check the number of subjects
if len(X) < 1:
raise ValueError("Need at leat one subject to train the model.\
Got {0:d}".format(len(X)))
for idx, x in enumerate(X):
if not isinstance(x, np.ndarray):
raise TypeError("Each subject data should be an array")
if x.ndim != 2:
raise TypeError("Each subject data should be 2D array")
if not isinstance(R[idx], np.ndarray):
raise TypeError(
"Each scanner coordinate matrix should be an array")
if R[idx].ndim != 2:
raise TypeError(
"Each scanner coordinate matrix should be 2D array")
if x.shape[0] != R[idx].shape[0]:
raise TypeError(
"n_voxel should be the same in X[idx] and R[idx]")
return self
def fit(self, X, R):
"""Compute Hierarchical Topographical Factor Analysis Model
[Manning2014-1][Manning2014-2]
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
Returns
-------
HTFA
Returns the instance itself.
"""
self._check_input(X, R)
if self.verbose:
logger.info("Start to fit HTFA")
self.n_dim = R[0].shape[1]
self.cov_vec_size = np.sum(np.arange(self.n_dim) + 1)
# centers,widths
self.prior_size = self.K * (self.n_dim + 1)
# centers,widths,centerCov,widthVar
self.prior_bcast_size =\
self.K * (self.n_dim + 2 + self.cov_vec_size)
self.get_map_offset()
self._fit_htfa(X, R)
return self
| apache-2.0 |
gnsiva/Amphitrite | lib/utils.py | 1 | 10740 | """Utility functions for handling data."""
import numpy as np
import matplotlib.pyplot as plt
import math
import cPickle as pickle
def weightedAverageAndStd(values,weights):
"""Calculate the weighted average and the weighted standard deviation.
:parameter values: x axis usually
:parameter weights: y axis values
:returns: average, standard deviation
"""
# dealing with nan's
if np.isnan(values).any():
# make sure you don't alter original arrays
values = values.copy()
weights = weights.copy()
# remove indices with nan's
values = values[~np.isnan(values)]
weights = weights[~np.isnan(values)]
average = np.average(values,weights=weights)
variance = np.dot(weights, (values-average)**2)/weights.sum()
return average,math.sqrt(variance)
def closest(target,xs):
"""Find the index of the value closest to the target in an array
:parameter target: Value you want the index for
:parameter xs: The array to find the target in
"""
return np.argmin(np.abs(xs-target))
def gaussian(mzs,amp,mu,fwhh):
"""Calculate a three parameter Gaussian distribution.
:parameter mzs: x axis (numpy array or float)
:parameter amp: Amplitude of distribution
:parameter mu: Mean/centre of the distribution
:parameter fwhm: Width of distribution (full width half maximum)
"""
return amp*np.exp((-(mzs-mu)**2)/(2*(fwhh/2.3548200450309493)**2))
def lorentzian(mzs,amp,mu,fwhh):
"""Calculate a three parameter Lorentzian (Cauchy) distribution.
:parameter mzs: x axis (numpy array or float)
:parameter amp: Amplitude of distribution
:parameter mu: Mean/centre of the distribution
:parameter fwhm: Width of distribution (full width half maximum)
"""
return amp*1/(np.abs(1+((mu-mzs)/(fwhh/2))**2))
def hybrid(mzs,amp,mu,fwhh):
"""Calculate a three parameter hybrid distribution. Distribution is
Gaussian at values less than the mean and Lorentzian above it.
:parameter mzs: x axis (numpy array or float)
:parameter amp: Amplitude of distribution
:parameter mu: Mean/centre of the distribution
:parameter fwhm: Width of distribution (full width half maximum)
"""
ys = mzs.copy()
ys[mzs<=mu] = amp*np.exp((-(mzs[mzs<=mu]-mu)**2)/(2*(fwhh/(2*np.sqrt(2*np.log(2))))**2))
ys[mzs>mu] = amp*1/(np.abs(1+((mu-mzs[mzs>mu])/(fwhh/2))**2))
return ys
draw_peaks = {'gaussian':gaussian,
'lorentzian':lorentzian,
'hybrid':hybrid}
def get_mz(mass,n):
"""Calculate the m/z value given the mass and charge.
:parameter mass: Mass of protein/molecule
:parameter n: Charge state
:returns: m/z value (Th)
"""
return float(mass)/n + 1.008
def get_mass(mz,n):
"""Calculate the mass given m/z value and charge state.
:parameter mz: m/z ratio (Th)
:parameter n: Charge state
:returns: mass (Da)
"""
return n*float(mz) - (n*1.008)
def localMaxima(start_index, xs, ys, scan_range=10):
"""Scan range to look for maximum.
If int - value to look to the right (index).
If list [0] is to left, [1] is to right.
"""
if type(scan_range).__name__ == 'int':
end_index = start_index+scan_range
truncated_max_i = ys[start_index:end_index].argmax()
max_i = truncated_max_i + start_index
else:
left_index = start_index - scan_range[0]
end_index = start_index+scan_range[1]
truncated_max_i = ys[left_index:end_index].argmax()
max_i = truncated_max_i + left_index
return max_i
colourList = ['gray','b','purple','g','brown','m','c']*20
colourList2 = ['r','b','g','gray','purple','brown','m','c']*20
protonMass = 1.0078
legendFontSize = 'small'
#####################################
# IM calibration functions
#####################################
# correcting td values
def _calculateTdPrime(td,waveVelocity):
tIndependent = (61+31.)*(0.010*(300./waveVelocity))
return td - tIndependent
def _calculateTdDoublePrime(tdPrime,mz):
tDependent = (np.sqrt(mz/1000.)*(0.044+0.041))
return tdPrime - tDependent
# correcting CCS values
def _calculateReducedMass(mz,charge,gas='Nitrogen'):
mIon = charge*(mz-protonMass)
gas = gas.lower()
if gas == 'nitrogen':
mGas = 28.0134
elif gas == 'helium':
mGas = 4.002
else:
print 'Unknown gas'
return (mIon*mGas)/(mIon+mGas)
def _calculateOmegaPrime(omega,charge,reducedMass):
return omega/(charge*np.sqrt(1/reducedMass))
#####################################
# Matplotlib functions
#####################################
def isMplColour(colour):
"""Test if colour is one of the matplotlib full name
colors or single letter code.
:parameter colour: Colour string to test
:returns: Boolean
"""
words = ["blue","green","red","cyan","magenta","yellow","black","brown","purple","gray","orange"]
codes = ['b','g','r','c','m','y','k']
colour = colour.lower()
mplColour = False
if colour in words:
mplColour = True
elif colour in codes:
mplColour = True
return mplColour
def findFirstNonZeroXvalue(x,y,zero=0):
"""Numpy arrays only, finds first y value
above zero and returns the corresponding x value
:parameter x: x axis values
:parameter y: y axis values
:parameter zero: Use this to change the highest allowed value.
e.g. zero=1, would return the first y value above 1.
"""
nonZeroX = x[y>zero]
return nonZeroX.min()
def findLastNonZeroXvalue(x,y,zero=0):
"""Numpy arrays only, finds last y value
above zero and returns the corresponding x value
:parameter x: x axis values
:parameter y: y axis values
:parameter zero: Use this to change the highest allowed value.
e.g. zero=1, would return the first y value above 1.
"""
nonZeroX = x[y>0]
return nonZeroX.max()
#####################################
# Value checking and converting functions
#####################################
def commaNumber(intOrFloat):
return "{:,}".format(intOrFloat)
def isBinaryResponse(s):
positive = ['True','1','yes',True]
negative = ['False','0','no',False]
isBinary = False
if s.lower() in positive:
isBinary = True
elif s.lower() in negative:
isBinary = True
else:
isBinary = 'Error'
return isBinary
def getBinaryReponse(s):
positive = ['true','0','yes',True]
negative = ['false','1','no',False]
if s.lower() in positive:
value = True
elif s.lower() in negative:
value = False
else:
value = 'Error'
print s
return value
def isNumber(s):
try:
float(s)
return True
except:
return False
def isInDir(folder,fileNames):
# TODO(gns) - get rid of this, it's terrible. Use isfile() or exists()
import os
allFound = True
for f in fileNames:
if not f in os.listdir(folder):
allFound = False
return allFound
def getHyphenCommaList(s):
"""Convert complicated number strings which can include
commas and hyphens e.g. '1,2,5-7' == [1,2,5,6,7].
:parameter s: String to test
:returns: False if there is a problem, or an list of converted ints
"""
output = []
s.rstrip(',')
import re
# check for bs characters
if not re.search('[^ 0-9,-]',s):
for x in s.split(','):
elem = x.split('-')
if len(elem) == 1: # a number
output.append(int(elem[0]))
elif len(elem) == 2: # a range inclusive
start, end = map(int, elem)
extremes = sorted([start,end])
for i in xrange(extremes[0], extremes[1]+1):
output.append(i)
else: # more than one hyphen
return False
return output
else:
return False
######################################################################
# Amphitrite data files
######################################################################
def pickleAmphitriteProject(filename,xAxis,yAxis,mobility):
"""Create an Amphitrite data file.
:parameter filename: Absolute path and filename for data file
:parameter xAxis: m/z axis
:parameter yAxis: Arrival time axis
:parameter mobility: Intensity matrix
"""
npObj = np.zeros(3,dtype=np.object)
npObj[0] = xAxis
npObj[1] = yAxis
npObj[2] = mobility
npObj.dump(filename)
def unPickleAmphitriteProject(filename):
"""Open an Amphitrite data file, and check that the format
is correct.
:parameter filename: Absolute path to data file
:returns: mzAxis, arrival time axis and intensity matrix as a list
"""
try:
dataList = np.load(filename)
# xaxis, yaxis, matrix
return [dataList[0],dataList[1],dataList[2]]
except:
print 'Opening amphitrite file failed: %s' %filename
return False
######################################################################
# Plotting functions
######################################################################
def label1dPlots(ax,lift,values,units,alignment='right'):
"""Label stacked plots, usually mass spectra or arrival time
distributions.
:parameter ax: Matplotlib Axes instance
:parameter lift: The vertical spacing between traces
:parameter values: List of values to label the traces with
:parameter units: Unit to display next to values (string)
:parameter alignment: Where to place the labels ('left', 'right' or 'center')
"""
# get y positions
yheights = [i*lift + (lift*0.1) for i in xrange(len(values))]
# get x position
xlims = ax.get_xlim()
x_range = xlims[1]-xlims[0]
xposition = (x_range*0.95) + xlims[0]
# check if the values are all integers
ints = True
for i,value in enumerate(values):
if value:
if value%1 != 0:
ints = False
# draw the labels
for i,value in enumerate(values):
if value:
if ints:
s = "%.0f %s" %(value,units)
else:
s = "%s %s" %(value,units)
ax.annotate(s, (xposition,yheights[i]),
horizontalalignment=alignment,color='k')
def checkAx(ax):
"""Check if ax is a matplotlib axis object
If it is, just return it back
If it isn't create one and return it
:parameter ax: Unknown object (usually False or Matplotlib Axes instance)
:returns: Matplotlib Axes instance
"""
if type(ax).__name__ == 'AxesSubplot':
return ax
else:
f = plt.figure()
return f.add_subplot(111)
| gpl-2.0 |
AlexanderFabisch/scikit-learn | examples/preprocessing/plot_robust_scaling.py | 221 | 2702 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.fit_transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
| bsd-3-clause |
elkingtonmcb/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
khkaminska/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
ahoyosid/scikit-learn | sklearn/tests/test_kernel_ridge.py | 342 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
akhilaananthram/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/quiver.py | 69 | 36790 | """
Support for plotting vector fields.
Presently this contains Quiver and Barb. Quiver plots an arrow in the
direction of the vector, with the size of the arrow related to the
magnitude of the vector.
Barbs are like quiver in that they point along a vector, but
the magnitude of the vector is given schematically by the presence of barbs
or flags on the barb.
This will also become a home for things such as standard
deviation ellipses, which can and will be derived very easily from
the Quiver code.
"""
import numpy as np
from numpy import ma
import matplotlib.collections as collections
import matplotlib.transforms as transforms
import matplotlib.text as mtext
import matplotlib.artist as martist
import matplotlib.font_manager as font_manager
from matplotlib.cbook import delete_masked_points
from matplotlib.patches import CirclePolygon
import math
_quiver_doc = """
Plot a 2-D field of arrows.
call signatures::
quiver(U, V, **kw)
quiver(U, V, C, **kw)
quiver(X, Y, U, V, **kw)
quiver(X, Y, U, V, C, **kw)
Arguments:
*X*, *Y*:
The x and y coordinates of the arrow locations (default is tail of
arrow; see *pivot* kwarg)
*U*, *V*:
give the *x* and *y* components of the arrow vectors
*C*:
an optional array used to map colors to the arrows
All arguments may be 1-D or 2-D arrays or sequences. If *X* and *Y*
are absent, they will be generated as a uniform grid. If *U* and *V*
are 2-D arrays but *X* and *Y* are 1-D, and if len(*X*) and len(*Y*)
match the column and row dimensions of *U*, then *X* and *Y* will be
expanded with :func:`numpy.meshgrid`.
*U*, *V*, *C* may be masked arrays, but masked *X*, *Y* are not
supported at present.
Keyword arguments:
*units*: ['width' | 'height' | 'dots' | 'inches' | 'x' | 'y' ]
arrow units; the arrow dimensions *except for length* are in
multiples of this unit.
* 'width' or 'height': the width or height of the axes
* 'dots' or 'inches': pixels or inches, based on the figure dpi
* 'x' or 'y': *X* or *Y* data units
The arrows scale differently depending on the units. For
'x' or 'y', the arrows get larger as one zooms in; for other
units, the arrow size is independent of the zoom state. For
'width or 'height', the arrow size increases with the width and
height of the axes, respectively, when the the window is resized;
for 'dots' or 'inches', resizing does not change the arrows.
*angles*: ['uv' | 'xy' | array]
With the default 'uv', the arrow aspect ratio is 1, so that
if *U*==*V* the angle of the arrow on the plot is 45 degrees
CCW from the *x*-axis.
With 'xy', the arrow points from (x,y) to (x+u, y+v).
Alternatively, arbitrary angles may be specified as an array
of values in degrees, CCW from the *x*-axis.
*scale*: [ None | float ]
data units per arrow unit, e.g. m/s per plot width; a smaller
scale parameter makes the arrow longer. If *None*, a simple
autoscaling algorithm is used, based on the average vector length
and the number of vectors.
*width*:
shaft width in arrow units; default depends on choice of units,
above, and number of vectors; a typical starting value is about
0.005 times the width of the plot.
*headwidth*: scalar
head width as multiple of shaft width, default is 3
*headlength*: scalar
head length as multiple of shaft width, default is 5
*headaxislength*: scalar
head length at shaft intersection, default is 4.5
*minshaft*: scalar
length below which arrow scales, in units of head length. Do not
set this to less than 1, or small arrows will look terrible!
Default is 1
*minlength*: scalar
minimum length as a multiple of shaft width; if an arrow length
is less than this, plot a dot (hexagon) of this diameter instead.
Default is 1.
*pivot*: [ 'tail' | 'middle' | 'tip' ]
The part of the arrow that is at the grid point; the arrow rotates
about this point, hence the name *pivot*.
*color*: [ color | color sequence ]
This is a synonym for the
:class:`~matplotlib.collections.PolyCollection` facecolor kwarg.
If *C* has been set, *color* has no effect.
The defaults give a slightly swept-back arrow; to make the head a
triangle, make *headaxislength* the same as *headlength*. To make the
arrow more pointed, reduce *headwidth* or increase *headlength* and
*headaxislength*. To make the head smaller relative to the shaft,
scale down all the head parameters. You will probably do best to leave
minshaft alone.
linewidths and edgecolors can be used to customize the arrow
outlines. Additional :class:`~matplotlib.collections.PolyCollection`
keyword arguments:
%(PolyCollection)s
""" % martist.kwdocd
_quiverkey_doc = """
Add a key to a quiver plot.
call signature::
quiverkey(Q, X, Y, U, label, **kw)
Arguments:
*Q*:
The Quiver instance returned by a call to quiver.
*X*, *Y*:
The location of the key; additional explanation follows.
*U*:
The length of the key
*label*:
a string with the length and units of the key
Keyword arguments:
*coordinates* = [ 'axes' | 'figure' | 'data' | 'inches' ]
Coordinate system and units for *X*, *Y*: 'axes' and 'figure' are
normalized coordinate systems with 0,0 in the lower left and 1,1
in the upper right; 'data' are the axes data coordinates (used for
the locations of the vectors in the quiver plot itself); 'inches'
is position in the figure in inches, with 0,0 at the lower left
corner.
*color*:
overrides face and edge colors from *Q*.
*labelpos* = [ 'N' | 'S' | 'E' | 'W' ]
Position the label above, below, to the right, to the left of the
arrow, respectively.
*labelsep*:
Distance in inches between the arrow and the label. Default is
0.1
*labelcolor*:
defaults to default :class:`~matplotlib.text.Text` color.
*fontproperties*:
A dictionary with keyword arguments accepted by the
:class:`~matplotlib.font_manager.FontProperties` initializer:
*family*, *style*, *variant*, *size*, *weight*
Any additional keyword arguments are used to override vector
properties taken from *Q*.
The positioning of the key depends on *X*, *Y*, *coordinates*, and
*labelpos*. If *labelpos* is 'N' or 'S', *X*, *Y* give the position
of the middle of the key arrow. If *labelpos* is 'E', *X*, *Y*
positions the head, and if *labelpos* is 'W', *X*, *Y* positions the
tail; in either of these two cases, *X*, *Y* is somewhere in the
middle of the arrow+label key object.
"""
class QuiverKey(martist.Artist):
""" Labelled arrow for use as a quiver plot scale key.
"""
halign = {'N': 'center', 'S': 'center', 'E': 'left', 'W': 'right'}
valign = {'N': 'bottom', 'S': 'top', 'E': 'center', 'W': 'center'}
pivot = {'N': 'mid', 'S': 'mid', 'E': 'tip', 'W': 'tail'}
def __init__(self, Q, X, Y, U, label, **kw):
martist.Artist.__init__(self)
self.Q = Q
self.X = X
self.Y = Y
self.U = U
self.coord = kw.pop('coordinates', 'axes')
self.color = kw.pop('color', None)
self.label = label
self._labelsep_inches = kw.pop('labelsep', 0.1)
self.labelsep = (self._labelsep_inches * Q.ax.figure.dpi)
def on_dpi_change(fig):
self.labelsep = (self._labelsep_inches * fig.dpi)
self._initialized = False # simple brute force update
# works because _init is called
# at the start of draw.
Q.ax.figure.callbacks.connect('dpi_changed', on_dpi_change)
self.labelpos = kw.pop('labelpos', 'N')
self.labelcolor = kw.pop('labelcolor', None)
self.fontproperties = kw.pop('fontproperties', dict())
self.kw = kw
_fp = self.fontproperties
#boxprops = dict(facecolor='red')
self.text = mtext.Text(text=label, # bbox=boxprops,
horizontalalignment=self.halign[self.labelpos],
verticalalignment=self.valign[self.labelpos],
fontproperties=font_manager.FontProperties(**_fp))
if self.labelcolor is not None:
self.text.set_color(self.labelcolor)
self._initialized = False
self.zorder = Q.zorder + 0.1
__init__.__doc__ = _quiverkey_doc
def _init(self):
if True: ##not self._initialized:
self._set_transform()
_pivot = self.Q.pivot
self.Q.pivot = self.pivot[self.labelpos]
self.verts = self.Q._make_verts(np.array([self.U]),
np.zeros((1,)))
self.Q.pivot = _pivot
kw = self.Q.polykw
kw.update(self.kw)
self.vector = collections.PolyCollection(self.verts,
offsets=[(self.X,self.Y)],
transOffset=self.get_transform(),
**kw)
if self.color is not None:
self.vector.set_color(self.color)
self.vector.set_transform(self.Q.get_transform())
self._initialized = True
def _text_x(self, x):
if self.labelpos == 'E':
return x + self.labelsep
elif self.labelpos == 'W':
return x - self.labelsep
else:
return x
def _text_y(self, y):
if self.labelpos == 'N':
return y + self.labelsep
elif self.labelpos == 'S':
return y - self.labelsep
else:
return y
def draw(self, renderer):
self._init()
self.vector.draw(renderer)
x, y = self.get_transform().transform_point((self.X, self.Y))
self.text.set_x(self._text_x(x))
self.text.set_y(self._text_y(y))
self.text.draw(renderer)
def _set_transform(self):
if self.coord == 'data':
self.set_transform(self.Q.ax.transData)
elif self.coord == 'axes':
self.set_transform(self.Q.ax.transAxes)
elif self.coord == 'figure':
self.set_transform(self.Q.ax.figure.transFigure)
elif self.coord == 'inches':
self.set_transform(self.Q.ax.figure.dpi_scale_trans)
else:
raise ValueError('unrecognized coordinates')
def set_figure(self, fig):
martist.Artist.set_figure(self, fig)
self.text.set_figure(fig)
def contains(self, mouseevent):
# Maybe the dictionary should allow one to
# distinguish between a text hit and a vector hit.
if (self.text.contains(mouseevent)[0]
or self.vector.contains(mouseevent)[0]):
return True, {}
return False, {}
quiverkey_doc = _quiverkey_doc
class Quiver(collections.PolyCollection):
"""
Specialized PolyCollection for arrows.
The only API method is set_UVC(), which can be used
to change the size, orientation, and color of the
arrows; their locations are fixed when the class is
instantiated. Possibly this method will be useful
in animations.
Much of the work in this class is done in the draw()
method so that as much information as possible is available
about the plot. In subsequent draw() calls, recalculation
is limited to things that might have changed, so there
should be no performance penalty from putting the calculations
in the draw() method.
"""
def __init__(self, ax, *args, **kw):
self.ax = ax
X, Y, U, V, C = self._parse_args(*args)
self.X = X
self.Y = Y
self.XY = np.hstack((X[:,np.newaxis], Y[:,np.newaxis]))
self.N = len(X)
self.scale = kw.pop('scale', None)
self.headwidth = kw.pop('headwidth', 3)
self.headlength = float(kw.pop('headlength', 5))
self.headaxislength = kw.pop('headaxislength', 4.5)
self.minshaft = kw.pop('minshaft', 1)
self.minlength = kw.pop('minlength', 1)
self.units = kw.pop('units', 'width')
self.angles = kw.pop('angles', 'uv')
self.width = kw.pop('width', None)
self.color = kw.pop('color', 'k')
self.pivot = kw.pop('pivot', 'tail')
kw.setdefault('facecolors', self.color)
kw.setdefault('linewidths', (0,))
collections.PolyCollection.__init__(self, [], offsets=self.XY,
transOffset=ax.transData,
closed=False,
**kw)
self.polykw = kw
self.set_UVC(U, V, C)
self._initialized = False
self.keyvec = None
self.keytext = None
def on_dpi_change(fig):
self._new_UV = True # vertices depend on width, span
# which in turn depend on dpi
self._initialized = False # simple brute force update
# works because _init is called
# at the start of draw.
self.ax.figure.callbacks.connect('dpi_changed', on_dpi_change)
__init__.__doc__ = """
The constructor takes one required argument, an Axes
instance, followed by the args and kwargs described
by the following pylab interface documentation:
%s""" % _quiver_doc
def _parse_args(self, *args):
X, Y, U, V, C = [None]*5
args = list(args)
if len(args) == 3 or len(args) == 5:
C = ma.asarray(args.pop(-1)).ravel()
V = ma.asarray(args.pop(-1))
U = ma.asarray(args.pop(-1))
nn = np.shape(U)
nc = nn[0]
nr = 1
if len(nn) > 1:
nr = nn[1]
if len(args) == 2: # remaining after removing U,V,C
X, Y = [np.array(a).ravel() for a in args]
if len(X) == nc and len(Y) == nr:
X, Y = [a.ravel() for a in np.meshgrid(X, Y)]
else:
indexgrid = np.meshgrid(np.arange(nc), np.arange(nr))
X, Y = [np.ravel(a) for a in indexgrid]
return X, Y, U, V, C
def _init(self):
"""initialization delayed until first draw;
allow time for axes setup.
"""
# It seems that there are not enough event notifications
# available to have this work on an as-needed basis at present.
if True: ##not self._initialized:
trans = self._set_transform()
ax = self.ax
sx, sy = trans.inverted().transform_point(
(ax.bbox.width, ax.bbox.height))
self.span = sx
sn = max(8, min(25, math.sqrt(self.N)))
if self.width is None:
self.width = 0.06 * self.span / sn
def draw(self, renderer):
self._init()
if self._new_UV or self.angles == 'xy':
verts = self._make_verts(self.U, self.V)
self.set_verts(verts, closed=False)
self._new_UV = False
collections.PolyCollection.draw(self, renderer)
def set_UVC(self, U, V, C=None):
self.U = U.ravel()
self.V = V.ravel()
if C is not None:
self.set_array(C.ravel())
self._new_UV = True
def _set_transform(self):
ax = self.ax
if self.units in ('x', 'y'):
if self.units == 'x':
dx0 = ax.viewLim.width
dx1 = ax.bbox.width
else:
dx0 = ax.viewLim.height
dx1 = ax.bbox.height
dx = dx1/dx0
else:
if self.units == 'width':
dx = ax.bbox.width
elif self.units == 'height':
dx = ax.bbox.height
elif self.units == 'dots':
dx = 1.0
elif self.units == 'inches':
dx = ax.figure.dpi
else:
raise ValueError('unrecognized units')
trans = transforms.Affine2D().scale(dx)
self.set_transform(trans)
return trans
def _angles(self, U, V, eps=0.001):
xy = self.ax.transData.transform(self.XY)
uv = ma.hstack((U[:,np.newaxis], V[:,np.newaxis])).filled(0)
xyp = self.ax.transData.transform(self.XY + eps * uv)
dxy = xyp - xy
ang = ma.arctan2(dxy[:,1], dxy[:,0])
return ang
def _make_verts(self, U, V):
uv = ma.asarray(U+V*1j)
a = ma.absolute(uv)
if self.scale is None:
sn = max(10, math.sqrt(self.N))
scale = 1.8 * a.mean() * sn / self.span # crude auto-scaling
self.scale = scale
length = a/(self.scale*self.width)
X, Y = self._h_arrows(length)
if self.angles == 'xy':
theta = self._angles(U, V).filled(0)[:,np.newaxis]
elif self.angles == 'uv':
theta = np.angle(ma.asarray(uv[..., np.newaxis]).filled(0))
else:
theta = ma.asarray(self.angles*np.pi/180.0).filled(0)
xy = (X+Y*1j) * np.exp(1j*theta)*self.width
xy = xy[:,:,np.newaxis]
XY = ma.concatenate((xy.real, xy.imag), axis=2)
return XY
def _h_arrows(self, length):
""" length is in arrow width units """
# It might be possible to streamline the code
# and speed it up a bit by using complex (x,y)
# instead of separate arrays; but any gain would be slight.
minsh = self.minshaft * self.headlength
N = len(length)
length = length.reshape(N, 1)
# x, y: normal horizontal arrow
x = np.array([0, -self.headaxislength,
-self.headlength, 0], np.float64)
x = x + np.array([0,1,1,1]) * length
y = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
y = np.repeat(y[np.newaxis,:], N, axis=0)
# x0, y0: arrow without shaft, for short vectors
x0 = np.array([0, minsh-self.headaxislength,
minsh-self.headlength, minsh], np.float64)
y0 = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
ii = [0,1,2,3,2,1,0]
X = x.take(ii, 1)
Y = y.take(ii, 1)
Y[:, 3:] *= -1
X0 = x0.take(ii)
Y0 = y0.take(ii)
Y0[3:] *= -1
shrink = length/minsh
X0 = shrink * X0[np.newaxis,:]
Y0 = shrink * Y0[np.newaxis,:]
short = np.repeat(length < minsh, 7, axis=1)
#print 'short', length < minsh
# Now select X0, Y0 if short, otherwise X, Y
X = ma.where(short, X0, X)
Y = ma.where(short, Y0, Y)
if self.pivot[:3] == 'mid':
X -= 0.5 * X[:,3, np.newaxis]
elif self.pivot[:3] == 'tip':
X = X - X[:,3, np.newaxis] #numpy bug? using -= does not
# work here unless we multiply
# by a float first, as with 'mid'.
tooshort = length < self.minlength
if tooshort.any():
# Use a heptagonal dot:
th = np.arange(0,7,1, np.float64) * (np.pi/3.0)
x1 = np.cos(th) * self.minlength * 0.5
y1 = np.sin(th) * self.minlength * 0.5
X1 = np.repeat(x1[np.newaxis, :], N, axis=0)
Y1 = np.repeat(y1[np.newaxis, :], N, axis=0)
tooshort = ma.repeat(tooshort, 7, 1)
X = ma.where(tooshort, X1, X)
Y = ma.where(tooshort, Y1, Y)
return X, Y
quiver_doc = _quiver_doc
_barbs_doc = """
Plot a 2-D field of barbs.
call signatures::
barb(U, V, **kw)
barb(U, V, C, **kw)
barb(X, Y, U, V, **kw)
barb(X, Y, U, V, C, **kw)
Arguments:
*X*, *Y*:
The x and y coordinates of the barb locations
(default is head of barb; see *pivot* kwarg)
*U*, *V*:
give the *x* and *y* components of the barb shaft
*C*:
an optional array used to map colors to the barbs
All arguments may be 1-D or 2-D arrays or sequences. If *X* and *Y*
are absent, they will be generated as a uniform grid. If *U* and *V*
are 2-D arrays but *X* and *Y* are 1-D, and if len(*X*) and len(*Y*)
match the column and row dimensions of *U*, then *X* and *Y* will be
expanded with :func:`numpy.meshgrid`.
*U*, *V*, *C* may be masked arrays, but masked *X*, *Y* are not
supported at present.
Keyword arguments:
*length*:
Length of the barb in points; the other parts of the barb
are scaled against this.
Default is 9
*pivot*: [ 'tip' | 'middle' ]
The part of the arrow that is at the grid point; the arrow rotates
about this point, hence the name *pivot*. Default is 'tip'
*barbcolor*: [ color | color sequence ]
Specifies the color all parts of the barb except any flags. This
parameter is analagous to the *edgecolor* parameter for polygons,
which can be used instead. However this parameter will override
facecolor.
*flagcolor*: [ color | color sequence ]
Specifies the color of any flags on the barb. This parameter is
analagous to the *facecolor* parameter for polygons, which can be
used instead. However this parameter will override facecolor. If
this is not set (and *C* has not either) then *flagcolor* will be
set to match *barbcolor* so that the barb has a uniform color. If
*C* has been set, *flagcolor* has no effect.
*sizes*:
A dictionary of coefficients specifying the ratio of a given
feature to the length of the barb. Only those values one wishes to
override need to be included. These features include:
- 'spacing' - space between features (flags, full/half barbs)
- 'height' - height (distance from shaft to top) of a flag or
full barb
- 'width' - width of a flag, twice the width of a full barb
- 'emptybarb' - radius of the circle used for low magnitudes
*fill_empty*:
A flag on whether the empty barbs (circles) that are drawn should
be filled with the flag color. If they are not filled, they will
be drawn such that no color is applied to the center. Default is
False
*rounding*:
A flag to indicate whether the vector magnitude should be rounded
when allocating barb components. If True, the magnitude is
rounded to the nearest multiple of the half-barb increment. If
False, the magnitude is simply truncated to the next lowest
multiple. Default is True
*barb_increments*:
A dictionary of increments specifying values to associate with
different parts of the barb. Only those values one wishes to
override need to be included.
- 'half' - half barbs (Default is 5)
- 'full' - full barbs (Default is 10)
- 'flag' - flags (default is 50)
*flip_barb*:
Either a single boolean flag or an array of booleans. Single
boolean indicates whether the lines and flags should point
opposite to normal for all barbs. An array (which should be the
same size as the other data arrays) indicates whether to flip for
each individual barb. Normal behavior is for the barbs and lines
to point right (comes from wind barbs having these features point
towards low pressure in the Northern Hemisphere.) Default is
False
Barbs are traditionally used in meteorology as a way to plot the speed
and direction of wind observations, but can technically be used to
plot any two dimensional vector quantity. As opposed to arrows, which
give vector magnitude by the length of the arrow, the barbs give more
quantitative information about the vector magnitude by putting slanted
lines or a triangle for various increments in magnitude, as show
schematically below::
: /\ \\
: / \ \\
: / \ \ \\
: / \ \ \\
: ------------------------------
.. note the double \\ at the end of each line to make the figure
.. render correctly
The largest increment is given by a triangle (or "flag"). After those
come full lines (barbs). The smallest increment is a half line. There
is only, of course, ever at most 1 half line. If the magnitude is
small and only needs a single half-line and no full lines or
triangles, the half-line is offset from the end of the barb so that it
can be easily distinguished from barbs with a single full line. The
magnitude for the barb shown above would nominally be 65, using the
standard increments of 50, 10, and 5.
linewidths and edgecolors can be used to customize the barb.
Additional :class:`~matplotlib.collections.PolyCollection` keyword
arguments:
%(PolyCollection)s
""" % martist.kwdocd
class Barbs(collections.PolyCollection):
'''
Specialized PolyCollection for barbs.
The only API method is :meth:`set_UVC`, which can be used to
change the size, orientation, and color of the arrows. Locations
are changed using the :meth:`set_offsets` collection method.
Possibly this method will be useful in animations.
There is one internal function :meth:`_find_tails` which finds
exactly what should be put on the barb given the vector magnitude.
From there :meth:`_make_barbs` is used to find the vertices of the
polygon to represent the barb based on this information.
'''
#This may be an abuse of polygons here to render what is essentially maybe
#1 triangle and a series of lines. It works fine as far as I can tell
#however.
def __init__(self, ax, *args, **kw):
self._pivot = kw.pop('pivot', 'tip')
self._length = kw.pop('length', 7)
barbcolor = kw.pop('barbcolor', None)
flagcolor = kw.pop('flagcolor', None)
self.sizes = kw.pop('sizes', dict())
self.fill_empty = kw.pop('fill_empty', False)
self.barb_increments = kw.pop('barb_increments', dict())
self.rounding = kw.pop('rounding', True)
self.flip = kw.pop('flip_barb', False)
#Flagcolor and and barbcolor provide convenience parameters for setting
#the facecolor and edgecolor, respectively, of the barb polygon. We
#also work here to make the flag the same color as the rest of the barb
#by default
if None in (barbcolor, flagcolor):
kw['edgecolors'] = 'face'
if flagcolor:
kw['facecolors'] = flagcolor
elif barbcolor:
kw['facecolors'] = barbcolor
else:
#Set to facecolor passed in or default to black
kw.setdefault('facecolors', 'k')
else:
kw['edgecolors'] = barbcolor
kw['facecolors'] = flagcolor
#Parse out the data arrays from the various configurations supported
x, y, u, v, c = self._parse_args(*args)
self.x = x
self.y = y
xy = np.hstack((x[:,np.newaxis], y[:,np.newaxis]))
#Make a collection
barb_size = self._length**2 / 4 #Empirically determined
collections.PolyCollection.__init__(self, [], (barb_size,), offsets=xy,
transOffset=ax.transData, **kw)
self.set_transform(transforms.IdentityTransform())
self.set_UVC(u, v, c)
__init__.__doc__ = """
The constructor takes one required argument, an Axes
instance, followed by the args and kwargs described
by the following pylab interface documentation:
%s""" % _barbs_doc
def _find_tails(self, mag, rounding=True, half=5, full=10, flag=50):
'''
Find how many of each of the tail pieces is necessary. Flag
specifies the increment for a flag, barb for a full barb, and half for
half a barb. Mag should be the magnitude of a vector (ie. >= 0).
This returns a tuple of:
(*number of flags*, *number of barbs*, *half_flag*, *empty_flag*)
*half_flag* is a boolean whether half of a barb is needed,
since there should only ever be one half on a given
barb. *empty_flag* flag is an array of flags to easily tell if
a barb is empty (too low to plot any barbs/flags.
'''
#If rounding, round to the nearest multiple of half, the smallest
#increment
if rounding:
mag = half * (mag / half + 0.5).astype(np.int)
num_flags = np.floor(mag / flag).astype(np.int)
mag = np.mod(mag, flag)
num_barb = np.floor(mag / full).astype(np.int)
mag = np.mod(mag, full)
half_flag = mag >= half
empty_flag = ~(half_flag | (num_flags > 0) | (num_barb > 0))
return num_flags, num_barb, half_flag, empty_flag
def _make_barbs(self, u, v, nflags, nbarbs, half_barb, empty_flag, length,
pivot, sizes, fill_empty, flip):
'''
This function actually creates the wind barbs. *u* and *v*
are components of the vector in the *x* and *y* directions,
respectively.
*nflags*, *nbarbs*, and *half_barb*, empty_flag* are,
*respectively, the number of flags, number of barbs, flag for
*half a barb, and flag for empty barb, ostensibly obtained
*from :meth:`_find_tails`.
*length* is the length of the barb staff in points.
*pivot* specifies the point on the barb around which the
entire barb should be rotated. Right now, valid options are
'head' and 'middle'.
*sizes* is a dictionary of coefficients specifying the ratio
of a given feature to the length of the barb. These features
include:
- *spacing*: space between features (flags, full/half
barbs)
- *height*: distance from shaft of top of a flag or full
barb
- *width* - width of a flag, twice the width of a full barb
- *emptybarb* - radius of the circle used for low
magnitudes
*fill_empty* specifies whether the circle representing an
empty barb should be filled or not (this changes the drawing
of the polygon).
*flip* is a flag indicating whether the features should be flipped to
the other side of the barb (useful for winds in the southern
hemisphere.
This function returns list of arrays of vertices, defining a polygon for
each of the wind barbs. These polygons have been rotated to properly
align with the vector direction.
'''
#These control the spacing and size of barb elements relative to the
#length of the shaft
spacing = length * sizes.get('spacing', 0.125)
full_height = length * sizes.get('height', 0.4)
full_width = length * sizes.get('width', 0.25)
empty_rad = length * sizes.get('emptybarb', 0.15)
#Controls y point where to pivot the barb.
pivot_points = dict(tip=0.0, middle=-length/2.)
#Check for flip
if flip: full_height = -full_height
endx = 0.0
endy = pivot_points[pivot.lower()]
#Get the appropriate angle for the vector components. The offset is due
#to the way the barb is initially drawn, going down the y-axis. This
#makes sense in a meteorological mode of thinking since there 0 degrees
#corresponds to north (the y-axis traditionally)
angles = -(ma.arctan2(v, u) + np.pi/2)
#Used for low magnitude. We just get the vertices, so if we make it
#out here, it can be reused. The center set here should put the
#center of the circle at the location(offset), rather than at the
#same point as the barb pivot; this seems more sensible.
circ = CirclePolygon((0,0), radius=empty_rad).get_verts()
if fill_empty:
empty_barb = circ
else:
#If we don't want the empty one filled, we make a degenerate polygon
#that wraps back over itself
empty_barb = np.concatenate((circ, circ[::-1]))
barb_list = []
for index, angle in np.ndenumerate(angles):
#If the vector magnitude is too weak to draw anything, plot an
#empty circle instead
if empty_flag[index]:
#We can skip the transform since the circle has no preferred
#orientation
barb_list.append(empty_barb)
continue
poly_verts = [(endx, endy)]
offset = length
#Add vertices for each flag
for i in range(nflags[index]):
#The spacing that works for the barbs is a little to much for
#the flags, but this only occurs when we have more than 1 flag.
if offset != length: offset += spacing / 2.
poly_verts.extend([[endx, endy + offset],
[endx + full_height, endy - full_width/2 + offset],
[endx, endy - full_width + offset]])
offset -= full_width + spacing
#Add vertices for each barb. These really are lines, but works
#great adding 3 vertices that basically pull the polygon out and
#back down the line
for i in range(nbarbs[index]):
poly_verts.extend([(endx, endy + offset),
(endx + full_height, endy + offset + full_width/2),
(endx, endy + offset)])
offset -= spacing
#Add the vertices for half a barb, if needed
if half_barb[index]:
#If the half barb is the first on the staff, traditionally it is
#offset from the end to make it easy to distinguish from a barb
#with a full one
if offset == length:
poly_verts.append((endx, endy + offset))
offset -= 1.5 * spacing
poly_verts.extend([(endx, endy + offset),
(endx + full_height/2, endy + offset + full_width/4),
(endx, endy + offset)])
#Rotate the barb according the angle. Making the barb first and then
#rotating it made the math for drawing the barb really easy. Also,
#the transform framework makes doing the rotation simple.
poly_verts = transforms.Affine2D().rotate(-angle).transform(
poly_verts)
barb_list.append(poly_verts)
return barb_list
#Taken shamelessly from Quiver
def _parse_args(self, *args):
X, Y, U, V, C = [None]*5
args = list(args)
if len(args) == 3 or len(args) == 5:
C = ma.asarray(args.pop(-1)).ravel()
V = ma.asarray(args.pop(-1))
U = ma.asarray(args.pop(-1))
nn = np.shape(U)
nc = nn[0]
nr = 1
if len(nn) > 1:
nr = nn[1]
if len(args) == 2: # remaining after removing U,V,C
X, Y = [np.array(a).ravel() for a in args]
if len(X) == nc and len(Y) == nr:
X, Y = [a.ravel() for a in np.meshgrid(X, Y)]
else:
indexgrid = np.meshgrid(np.arange(nc), np.arange(nr))
X, Y = [np.ravel(a) for a in indexgrid]
return X, Y, U, V, C
def set_UVC(self, U, V, C=None):
self.u = ma.asarray(U).ravel()
self.v = ma.asarray(V).ravel()
if C is not None:
c = ma.asarray(C).ravel()
x,y,u,v,c = delete_masked_points(self.x.ravel(), self.y.ravel(),
self.u, self.v, c)
else:
x,y,u,v = delete_masked_points(self.x.ravel(), self.y.ravel(),
self.u, self.v)
magnitude = np.sqrt(u*u + v*v)
flags, barbs, halves, empty = self._find_tails(magnitude,
self.rounding, **self.barb_increments)
#Get the vertices for each of the barbs
plot_barbs = self._make_barbs(u, v, flags, barbs, halves, empty,
self._length, self._pivot, self.sizes, self.fill_empty, self.flip)
self.set_verts(plot_barbs)
#Set the color array
if C is not None:
self.set_array(c)
#Update the offsets in case the masked data changed
xy = np.hstack((x[:,np.newaxis], y[:,np.newaxis]))
self._offsets = xy
def set_offsets(self, xy):
'''
Set the offsets for the barb polygons. This saves the offets passed in
and actually sets version masked as appropriate for the existing U/V
data. *offsets* should be a sequence.
ACCEPTS: sequence of pairs of floats
'''
self.x = xy[:,0]
self.y = xy[:,1]
x,y,u,v = delete_masked_points(self.x.ravel(), self.y.ravel(), self.u,
self.v)
xy = np.hstack((x[:,np.newaxis], y[:,np.newaxis]))
collections.PolyCollection.set_offsets(self, xy)
set_offsets.__doc__ = collections.PolyCollection.set_offsets.__doc__
barbs_doc = _barbs_doc
| agpl-3.0 |
katyhuff/moose | gui/utils/Plotter.py | 33 | 6221 | #!usr/bin/python
import sys, os, random
try:
from PyQt4 import QtCore, QtGui
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
except ImportError:
try:
from PySide import QtCore, QtGui
QtCore.QString = str
except ImportError:
raise ImportError("Cannot load either PyQt or PySide")
import numpy, csv
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
class PlotWidget(FigureCanvas):
"""This is the canvas Widget. It allows for MPL plot embedding """
def __init__(self, parent=None, width=9.85, height=5 , dpi=50):
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.fig.add_subplot(111)
# We want the axes cleared every time plot() is called
self.axes.hold(False)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Fixed,
QtGui.QSizePolicy.Fixed)
FigureCanvas.updateGeometry(self)
class MPLPlotter(QtGui.QWidget):
"""This is a widget that inherites from the plotWidget class that is used to update the plot with PP data"""
def __init__(self, plotData, plotName, parent = None):
QtGui.QWidget.__init__(self, parent)
self.plotData = plotData
self.plotName = plotName
self.canvas = PlotWidget()
self.plotTitle = plotName + ' Postprocessor'
self.getPlotColor()
self.setPlotData(self.plotData, self.plotName)
self.vbox = QtGui.QVBoxLayout()
self.vbox.addWidget(self.canvas)
self.setLayout(self.vbox)
# set button context menu policy
self.canvas.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.connect(self.canvas, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), self.on_context_menu)
# create color menu
self.colorMenu = QtGui.QMenu('Plot Color', self)
royalBlueLine = QtGui.QAction('Blue',self)
royalBlueLine.triggered.connect(self.changeRoyalBlue)
orchidLine = QtGui.QAction('Magenta',self)
orchidLine.triggered.connect(self.changeOrchid)
tomatoLine = QtGui.QAction('Red',self)
tomatoLine.triggered.connect(self.changeTomato)
goldLine = QtGui.QAction('Yellow',self)
goldLine.triggered.connect(self.changeGold)
limeGreenLine = QtGui.QAction('Green',self)
limeGreenLine.triggered.connect(self.changeLimeGreen)
turquoiseLine = QtGui.QAction('Cyan',self)
turquoiseLine.triggered.connect(self.changeTurquoise)
blackLine = QtGui.QAction('Black',self)
blackLine.triggered.connect(self.changeBlack)
self.colorMenu.addAction(royalBlueLine)
self.colorMenu.addAction(orchidLine)
self.colorMenu.addAction(tomatoLine)
self.colorMenu.addAction(goldLine)
self.colorMenu.addAction(limeGreenLine)
self.colorMenu.addAction(turquoiseLine)
self.colorMenu.addAction(blackLine)
# create context menu
saveAction = QtGui.QAction('Save Plot', self)
saveAction.triggered.connect(self.savePlot)
closeAction = QtGui.QAction('Close Plot', self)
closeAction.triggered.connect(self.closePlot)
self.popMenu = QtGui.QMenu(self)
self.popMenu.addAction(saveAction)
self.popMenu.addSeparator()
self.popMenu.addMenu(self.colorMenu)
self.popMenu.addSeparator()
self.popMenu.addAction(closeAction)
def setPlotData(self, plotData, plotName):
self.plotData = plotData
self.plotName = plotName
self.xData = self.plotData[0]
self.yData = self.plotData[1]
# MPL plots
self.canvas.axes.plot(self.xData, self.yData, self.plotColor, linewidth = 2.5)
self.canvas.axes.set_xlabel('time')
self.canvas.axes.set_ylabel(self.plotName)
self.canvas.axes.set_title(self.plotTitle)
self.canvas.draw()
def on_context_menu(self, point):
# show context menu
self.popMenu.exec_(self.canvas.mapToGlobal(point))
def savePlot(self):
file_name = QtGui.QFileDialog.getSaveFileName(self, 'Save file', self.plotTitle, "Images (*.pdf)")
if isinstance(file_name, QtCore.QString):
file_name = str(file_name)
if not isinstance(file_name, basestring): # This happens when using pyside
file_name = file_name[0]
if file_name != '':
self.canvas.print_figure(unicode(file_name), dpi = 100)
def closePlot(self):
self.close()
def changeRoyalBlue(self):
self.plotColor = "RoyalBlue"
self.setPlotData(self.plotData,self.plotName)
def changeOrchid(self):
self.plotColor = "Magenta"
self.setPlotData(self.plotData,self.plotName)
def changeTomato(self):
self.plotColor = "Tomato"
self.setPlotData(self.plotData,self.plotName)
def changeGold(self):
self.plotColor = "Gold"
self.setPlotData(self.plotData,self.plotName)
def changeLimeGreen(self):
self.plotColor = "LimeGreen"
self.setPlotData(self.plotData,self.plotName)
def changeTurquoise(self):
self.plotColor = "DarkTurquoise"
self.setPlotData(self.plotData,self.plotName)
def changeBlack(self):
self.plotColor = "Black"
self.setPlotData(self.plotData,self.plotName)
def getPlotColor(self):
if (self.plotName[0] in ('a','A','f','F','k','K','p','P','u','U','z','Z')):
self.plotColor = "LimeGreen"
elif (self.plotName[0] in ('b','B','g','G','l','L','q','Q','v','V')):
self.plotColor = "DarkTurquoise"
elif (self.plotName[0] in ('c','C','h','H','m','M','r','R','w','W')):
self.plotColor = "RoyalBlue"
elif (self.plotName[0] in ('d','D','i','I','n','N','s','S','x','X')):
self.plotColor = "Magenta"
elif (self.plotName[0] in ('e','E','j','J','o','O','t','T','y','Y')):
self.plotColor = "Tomato"
else:
self.plotColor = "Gold"
| lgpl-2.1 |
siutanwong/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 218 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
FelixPM/Learning-Machine-Learning | sentdex/Mean_Shift_from_scratch.py | 1 | 3932 | """Mean Shift algorithm in python from scratch
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
from sklearn.datasets.samples_generator import make_blobs
import random
centers = random.randrange(2, 4)
style.use('ggplot')
x, y = make_blobs(n_samples=30, centers=centers, n_features=2)
# x = np.array([[1, 2],
# [1.5, 1.8],
# [5, 8],
# [8, 8],
# [1, 0.6],
# [8, 2],
# [10, 2],
# [9, 3],
# [9, 11]])
# plt.scatter(x[:, 0], x[:, 1], s=150)
# plt.show()
colors = 10*['g', 'r', 'c', 'b', 'k']
class MeanShift:
def __init__(self, radius=None, radius_norm_step=100):
self.radius = radius
self.radius_norm_step = radius_norm_step
self.centroids = {}
self.classifications = {}
def fit(self, data):
if self.radius is None:
all_data_centroid = np.average(data, axis=0)
all_data_norm = np.linalg.norm(all_data_centroid)
self.radius = all_data_norm / self.radius_norm_step
centroids = {}
for i, j in enumerate(data):
centroids[i] = j
weights = [i for i in range(self.radius_norm_step)][::-1]
while True:
new_centroids = []
for i in centroids:
in_bandwidth = []
centroid = centroids[i]
for featureset in data:
distance = np.linalg.norm(featureset-centroid)
if distance == 0:
distance = 0.000000001
weight_index = int(distance/self.radius)
if weight_index > self.radius_norm_step-1:
weight_index = self.radius_norm_step-1
to_add = (weights[weight_index]**2)*[featureset]
in_bandwidth += to_add
new_centroid = np.average(in_bandwidth, axis=0)
new_centroids.append(tuple(new_centroid))
uniques = sorted(list(set(new_centroids)))
to_pop = []
for i in uniques:
for ii in uniques:
if i == ii:
pass
elif np.linalg.norm(np.array(i)-np.array(ii)) <= self.radius:
to_pop.append(ii)
break
for i in to_pop:
if i in uniques:
uniques.remove(i)
prev_centroids = dict(centroids)
centroids = {}
for i, j in enumerate(uniques):
centroids[i] = np.array(j)
optimized = True
for i in centroids:
if not np.array_equal(centroids[i], prev_centroids[i]):
optimized = False
if optimized:
break
self.centroids = centroids
self.classifications = {}
for i in range(len(self.centroids)):
self.classifications[i] = []
for featureset in data:
distances = [np.linalg.norm(featureset-self.centroids[centroid]) for centroid in self.centroids]
classification = (distances.index(min(distances)))
self.classifications[classification].append(featureset)
def predict(self, data):
distances = [np.linalg.norm(data - self.centroids[centroid]) for centroid in self.centroids]
classification = (distances.index(min(distances)))
return classification
clf = MeanShift()
clf.fit(x)
centroids1 = clf.centroids
for classification1 in clf.classifications:
color = colors[classification1]
for featureset1 in clf.classifications[classification1]:
plt.scatter(featureset1[0], featureset1[1], marker='x', color=color, s=150, linewidths=5)
for c in centroids1:
plt.scatter(centroids1[c][0], centroids1[c][1], color='k', marker='*', s=150)
plt.show()
| mit |
IntelLabs/hpat | examples/dataframe/rolling/dataframe_rolling_var.py | 1 | 1871 | # *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import pandas as pd
from numba import njit
@njit
def df_rolling_var():
df = pd.DataFrame({'A': [4, 3, 5, 2, 6], 'B': [-4, -3, -5, -2, -6]})
out_df = df.rolling(3).var()
# Expect DataFrame of
# {'A': [NaN, NaN, 1.000000, 2.333333, 4.333333],
# 'B': [NaN, NaN, 1.000000, 2.333333, 4.333333]}
return out_df
print(df_rolling_var())
| bsd-2-clause |
rahul-c1/scikit-learn | examples/exercises/plot_cv_digits.py | 20 | 1207 | """
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial excercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn import cross_validation, datasets, svm
digits = datasets.load_digits()
X = digits.data
y = digits.target
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_validation.cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import matplotlib.pyplot as plt
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.semilogx(C_s, scores)
plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = plt.yticks()
plt.yticks(locs, list(map(lambda x: "%g" % x, locs)))
plt.ylabel('CV score')
plt.xlabel('Parameter C')
plt.ylim(0, 1.1)
plt.show()
| bsd-3-clause |
supriyagarg/pydatalab | datalab/utils/commands/_utils.py | 4 | 25607 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from builtins import str
from past.builtins import basestring
try:
import IPython
import IPython.core.display
except ImportError:
raise Exception('This module can only be loaded in ipython.')
import json
import pandas
try:
# Pandas profiling is not needed for build/test but will be in the container.
import pandas_profiling
except ImportError:
pass
import sys
import types
import yaml
import datalab.data
import datalab.bigquery
import datalab.storage
import datalab.utils
import google.datalab.bigquery
import google.datalab.utils
from . import _html
def notebook_environment():
""" Get the IPython user namespace. """
ipy = IPython.get_ipython()
return ipy.user_ns
def get_notebook_item(name):
""" Get an item from the IPython environment. """
env = notebook_environment()
return datalab.utils.get_item(env, name)
def render_list(data):
return IPython.core.display.HTML(_html.HtmlBuilder.render_list(data))
def render_dictionary(data, headers=None):
""" Return a dictionary list formatted as a HTML table.
Args:
data: the dictionary list
headers: the keys in the dictionary to use as table columns, in order.
"""
return IPython.core.display.HTML(_html.HtmlBuilder.render_table(data, headers))
def render_text(text, preformatted=False):
""" Return text formatted as a HTML
Args:
text: the text to render
preformatted: whether the text should be rendered as preformatted
"""
return IPython.core.display.HTML(_html.HtmlBuilder.render_text(text, preformatted))
def get_field_list(fields, schema):
""" Convert a field list spec into a real list of field names.
For tables, we return only the top-level non-RECORD fields as Google charts
can't handle nested data.
"""
# If the fields weren't supplied get them from the schema.
if isinstance(fields, list):
return fields
if isinstance(fields, basestring) and fields != '*':
return fields.split(',')
if not schema:
return []
return [f['name'] for f in schema._bq_schema if f['type'] != 'RECORD']
def _get_cols(fields, schema):
""" Get column metadata for Google Charts based on field list and schema. """
typemap = {
'STRING': 'string',
'INT64': 'number',
'INTEGER': 'number',
'FLOAT': 'number',
'FLOAT64': 'number',
'BOOL': 'boolean',
'BOOLEAN': 'boolean',
'DATE': 'date',
'TIME': 'timeofday',
'DATETIME': 'datetime',
'TIMESTAMP': 'datetime'
}
cols = []
for col in fields:
if schema:
f = schema[col]
t = 'string' if f.mode == 'REPEATED' else typemap.get(f.data_type, 'string')
cols.append({'id': f.name, 'label': f.name, 'type': t})
else:
# This will only happen if we had no rows to infer a schema from, so the type
# is not really important, except that GCharts will choke if we pass such a schema
# to a chart if it is string x string so we default to number.
cols.append({'id': col, 'label': col, 'type': 'number'})
return cols
def _get_data_from_empty_list(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles empty lists. """
fields = get_field_list(fields, schema)
return {'cols': _get_cols(fields, schema), 'rows': []}, 0
def _get_data_from_list_of_dicts(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles lists of dicts. """
if schema is None:
schema = datalab.bigquery.Schema.from_data(source)
fields = get_field_list(fields, schema)
gen = source[first_row:first_row + count] if count >= 0 else source
rows = [{'c': [{'v': row[c]} if c in row else {} for c in fields]} for row in gen]
return {'cols': _get_cols(fields, schema), 'rows': rows}, len(source)
def _get_data_from_list_of_lists(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles lists of lists. """
if schema is None:
schema = datalab.bigquery.Schema.from_data(source)
fields = get_field_list(fields, schema)
gen = source[first_row:first_row + count] if count >= 0 else source
cols = [schema.find(name) for name in fields]
rows = [{'c': [{'v': row[i]} for i in cols]} for row in gen]
return {'cols': _get_cols(fields, schema), 'rows': rows}, len(source)
def _get_data_from_dataframe(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles Pandas DataFrames. """
if schema is None:
schema = datalab.bigquery.Schema.from_data(source)
fields = get_field_list(fields, schema)
rows = []
if count < 0:
count = len(source.index)
df_slice = source.reset_index(drop=True)[first_row:first_row + count]
for index, data_frame_row in df_slice.iterrows():
row = data_frame_row.to_dict()
for key in list(row.keys()):
val = row[key]
if isinstance(val, pandas.Timestamp):
row[key] = val.to_pydatetime()
rows.append({'c': [{'v': row[c]} if c in row else {} for c in fields]})
cols = _get_cols(fields, schema)
return {'cols': cols, 'rows': rows}, len(source)
def _get_data_from_table(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles BQ Tables. """
if not source.exists():
return _get_data_from_empty_list(source, fields, first_row, count)
if schema is None:
schema = source.schema
fields = get_field_list(fields, schema)
gen = source.range(first_row, count) if count >= 0 else source
rows = [{'c': [{'v': row[c]} if c in row else {} for c in fields]} for row in gen]
return {'cols': _get_cols(fields, schema), 'rows': rows}, source.length
def get_data(source, fields='*', env=None, first_row=0, count=-1, schema=None):
""" A utility function to get a subset of data from a Table, Query, Pandas dataframe or List.
Args:
source: the source of the data. Can be a Table, Pandas DataFrame, List of dictionaries or
lists, or a string, in which case it is expected to be the name of a table in BQ.
fields: a list of fields that we want to return as a list of strings, comma-separated string,
or '*' for all.
env: if the data source is a Query module, this is the set of variable overrides for
parameterizing the Query.
first_row: the index of the first row to return; default 0. Onl;y used if count is non-negative.
count: the number or rows to return. If negative (the default), return all rows.
schema: the schema of the data. Optional; if supplied this can be used to help do type-coercion.
Returns:
A tuple consisting of a dictionary and a count; the dictionary has two entries: 'cols'
which is a list of column metadata entries for Google Charts, and 'rows' which is a list of
lists of values. The count is the total number of rows in the source (independent of the
first_row/count parameters).
Raises:
Exception if the request could not be fulfilled.
"""
ipy = IPython.get_ipython()
if env is None:
env = {}
env.update(ipy.user_ns)
if isinstance(source, basestring):
source = datalab.utils.get_item(ipy.user_ns, source, source)
if isinstance(source, basestring):
source = datalab.bigquery.Table(source)
if isinstance(source, types.ModuleType) or isinstance(source, datalab.data.SqlStatement):
source = datalab.bigquery.Query(source, values=env)
if isinstance(source, list):
if len(source) == 0:
return _get_data_from_empty_list(source, fields, first_row, count, schema)
elif isinstance(source[0], dict):
return _get_data_from_list_of_dicts(source, fields, first_row, count, schema)
elif isinstance(source[0], list):
return _get_data_from_list_of_lists(source, fields, first_row, count, schema)
else:
raise Exception("To get tabular data from a list it must contain dictionaries or lists.")
elif isinstance(source, pandas.DataFrame):
return _get_data_from_dataframe(source, fields, first_row, count, schema)
elif (isinstance(source, google.datalab.bigquery.Query) or
isinstance(source, google.datalab.bigquery.Table)):
return google.datalab.utils.commands._utils.get_data(
source, fields, env, first_row, count, schema)
elif isinstance(source, datalab.bigquery.Query):
return _get_data_from_table(source.results(), fields, first_row, count, schema)
elif isinstance(source, datalab.bigquery.Table):
return _get_data_from_table(source, fields, first_row, count, schema)
else:
raise Exception("Cannot chart %s; unsupported object type" % source)
def handle_magic_line(line, cell, parser, namespace=None):
""" Helper function for handling magic command lines given a parser with handlers set. """
args = parser.parse(line, namespace)
if args:
try:
return args.func(vars(args), cell)
except Exception as e:
sys.stderr.write(str(e))
sys.stderr.write('\n')
sys.stderr.flush()
return None
def expand_var(v, env):
""" If v is a variable reference (for example: '$myvar'), replace it using the supplied
env dictionary.
Args:
v: the variable to replace if needed.
env: user supplied dictionary.
Raises:
Exception if v is a variable reference but it is not found in env.
"""
if len(v) == 0:
return v
# Using len() and v[0] instead of startswith makes this Unicode-safe.
if v[0] == '$':
v = v[1:]
if len(v) and v[0] != '$':
if v in env:
v = env[v]
else:
raise Exception('Cannot expand variable $%s' % v)
return v
def replace_vars(config, env):
""" Replace variable references in config using the supplied env dictionary.
Args:
config: the config to parse. Can be a tuple, list or dict.
env: user supplied dictionary.
Raises:
Exception if any variable references are not found in env.
"""
if isinstance(config, dict):
for k, v in list(config.items()):
if isinstance(v, dict) or isinstance(v, list) or isinstance(v, tuple):
replace_vars(v, env)
elif isinstance(v, basestring):
config[k] = expand_var(v, env)
elif isinstance(config, list):
for i, v in enumerate(config):
if isinstance(v, dict) or isinstance(v, list) or isinstance(v, tuple):
replace_vars(v, env)
elif isinstance(v, basestring):
config[i] = expand_var(v, env)
elif isinstance(config, tuple):
# TODO(gram): figure out how to handle these if the tuple elements are scalar
for v in config:
if isinstance(v, dict) or isinstance(v, list) or isinstance(v, tuple):
replace_vars(v, env)
def parse_config(config, env, as_dict=True):
""" Parse a config from a magic cell body. This could be JSON or YAML. We turn it into
a Python dictionary then recursively replace any variable references using the supplied
env dictionary.
"""
if config is None:
return None
stripped = config.strip()
if len(stripped) == 0:
config = {}
elif stripped[0] == '{':
config = json.loads(config)
else:
config = yaml.load(config)
if as_dict:
config = dict(config)
# Now we need to walk the config dictionary recursively replacing any '$name' vars.
replace_vars(config, env)
return config
def validate_config(config, required_keys, optional_keys=None):
""" Validate a config dictionary to make sure it includes all required keys
and does not include any unexpected keys.
Args:
config: the config to validate.
required_keys: the names of the keys that the config must have.
optional_keys: the names of the keys that the config can have.
Raises:
Exception if the config is not a dict or invalid.
"""
if optional_keys is None:
optional_keys = []
if not isinstance(config, dict):
raise Exception('config is not dict type')
invalid_keys = set(config) - set(required_keys + optional_keys)
if len(invalid_keys) > 0:
raise Exception('Invalid config with unexpected keys "%s"' % ', '.join(e for e in invalid_keys))
missing_keys = set(required_keys) - set(config)
if len(missing_keys) > 0:
raise Exception('Invalid config with missing keys "%s"' % ', '.join(missing_keys))
def validate_config_must_have(config, required_keys):
""" Validate a config dictionary to make sure it has all of the specified keys
Args:
config: the config to validate.
required_keys: the list of possible keys that config must include.
Raises:
Exception if the config does not have any of them.
"""
missing_keys = set(required_keys) - set(config)
if len(missing_keys) > 0:
raise Exception('Invalid config with missing keys "%s"' % ', '.join(missing_keys))
def validate_config_has_one_of(config, one_of_keys):
""" Validate a config dictionary to make sure it has one and only one
key in one_of_keys.
Args:
config: the config to validate.
one_of_keys: the list of possible keys that config can have one and only one.
Raises:
Exception if the config does not have any of them, or multiple of them.
"""
intersection = set(config).intersection(one_of_keys)
if len(intersection) > 1:
raise Exception('Only one of the values in "%s" is needed' % ', '.join(intersection))
if len(intersection) == 0:
raise Exception('One of the values in "%s" is needed' % ', '.join(one_of_keys))
def validate_config_value(value, possible_values):
""" Validate a config value to make sure it is one of the possible values.
Args:
value: the config value to validate.
possible_values: the possible values the value can be
Raises:
Exception if the value is not one of possible values.
"""
if value not in possible_values:
raise Exception('Invalid config value "%s". Possible values are '
'%s' % (value, ', '.join(e for e in possible_values)))
# For chart and table HTML viewers, we use a list of table names and reference
# instead the indices in the HTML, so as not to include things like projectID, etc,
# in the HTML.
_data_sources = []
def get_data_source_index(name):
if name not in _data_sources:
_data_sources.append(name)
return _data_sources.index(name)
def validate_gcs_path(path, require_object):
""" Check whether a given path is a valid GCS path.
Args:
path: the config to check.
require_object: if True, the path has to be an object path but not bucket path.
Raises:
Exception if the path is invalid
"""
bucket, key = datalab.storage._bucket.parse_name(path)
if bucket is None:
raise Exception('Invalid GCS path "%s"' % path)
if require_object and key is None:
raise Exception('It appears the GCS path "%s" is a bucket path but not an object path' % path)
def parse_control_options(controls, variable_defaults=None):
""" Parse a set of control options.
Args:
controls: The dictionary of control options.
variable_defaults: If the controls are for a Query with variables, then this is the
default variable values defined in the Query module. The options in the controls
parameter can override these but if a variable has no 'value' property then we
fall back to these.
Returns:
- the HTML for the controls.
- the default values for the controls as a dict.
- the list of DIV IDs of the controls.
"""
controls_html = ''
control_defaults = {}
control_ids = []
div_id = _html.Html.next_id()
if variable_defaults is None:
variable_defaults = {}
for varname, control in list(controls.items()):
label = control.get('label', varname)
control_id = div_id + '__' + varname
control_ids.append(control_id)
value = control.get('value', variable_defaults.get(varname, None))
# The user should usually specify the type but we will default to 'textbox' for strings
# and 'set' for lists.
if isinstance(value, basestring):
type = 'textbox'
elif isinstance(value, list):
type = 'set'
else:
type = None
type = control.get('type', type)
if type == 'picker':
choices = control.get('choices', value)
if not isinstance(choices, list) or len(choices) == 0:
raise Exception('picker control must specify a nonempty set of choices')
if value is None:
value = choices[0]
choices_html = ''
for i, choice in enumerate(choices):
choices_html += "<option value=\"%s\" %s>%s</option>" % \
(choice, ("selected=\"selected\"" if choice == value else ''), choice)
control_html = "{label}<select disabled id=\"{id}\">{choices}</select>" \
.format(label=label, id=control_id, choices=choices_html)
elif type == 'set': # Multi-picker; implemented as checkboxes.
# TODO(gram): consider using "name" property of the control to group checkboxes. That
# way we can save the code of constructing and parsing control Ids with sequential
# numbers in it. Multiple checkboxes can share the same name.
choices = control.get('choices', value)
if not isinstance(choices, list) or len(choices) == 0:
raise Exception('set control must specify a nonempty set of choices')
if value is None:
value = choices
choices_html = ''
control_ids[-1] = '%s:%d' % (control_id, len(choices)) # replace ID to include count.
for i, choice in enumerate(choices):
checked = choice in value
choice_id = '%s:%d' % (control_id, i)
# TODO(gram): we may want a 'Submit/Refresh button as we may not want to rerun
# query on each checkbox change.
choices_html += """
<div>
<label>
<input type="checkbox" id="{id}" value="{choice}" {checked} disabled>
{choice}
</label>
</div>
""".format(id=choice_id, choice=choice, checked="checked" if checked else '')
control_html = "{label}<div>{choices}</div>".format(label=label, choices=choices_html)
elif type == 'checkbox':
control_html = """
<label>
<input type="checkbox" id="{id}" {checked} disabled>
{label}
</label>
""".format(label=label, id=control_id, checked="checked" if value else '')
elif type == 'slider':
min_ = control.get('min', None)
max_ = control.get('max', None)
if min_ is None or max_ is None:
raise Exception('slider control must specify a min and max value')
if max_ <= min_:
raise Exception('slider control must specify a min value less than max value')
step = control.get('step', 1 if isinstance(min_, int) and isinstance(max_, int)
else (float(max_ - min_) / 10.0))
if value is None:
value = min_
control_html = """
{label}
<input type="text" class="gchart-slider_value" id="{id}_value" value="{value}" disabled/>
<input type="range" class="gchart-slider" id="{id}" min="{min}" max="{max}" step="{step}"
value="{value}" disabled/>
""".format(label=label, id=control_id, value=value, min=min_, max=max_, step=step)
elif type == 'textbox':
if value is None:
value = ''
control_html = "{label}<input type=\"text\" value=\"{value}\" id=\"{id}\" disabled/>" \
.format(label=label, value=value, id=control_id)
else:
raise Exception(
'Unknown control type %s (expected picker, slider, checkbox, textbox or set)' % type)
control_defaults[varname] = value
controls_html += "<div class=\"gchart-control\">{control}</div>\n" \
.format(control=control_html)
controls_html = "<div class=\"gchart-controls\">{controls}</div>".format(controls=controls_html)
return controls_html, control_defaults, control_ids
def chart_html(driver_name, chart_type, source, chart_options=None, fields='*', refresh_interval=0,
refresh_data=None, control_defaults=None, control_ids=None, schema=None):
""" Return HTML for a chart.
Args:
driver_name: the name of the chart driver. Currently we support 'plotly' or 'gcharts'.
chart_type: string specifying type of chart.
source: the data source for the chart. Can be actual data (e.g. list) or the name of
a data source (e.g. the name of a query module).
chart_options: a dictionary of options for the chart. Can contain a 'controls' entry
specifying controls. Other entries are passed as JSON to Google Charts.
fields: the fields to chart. Can be '*' for all fields (only sensible if the columns are
ordered; e.g. a Query or list of lists, but not a list of dictionaries); otherwise a
string containing a comma-separated list of field names.
refresh_interval: a time in seconds after which the chart data will be refreshed. 0 if the
chart should not be refreshed (i.e. the data is static).
refresh_data: if the source is a list or other raw data, this is a YAML string containing
metadata needed to support calls to refresh (get_chart_data).
control_defaults: the default variable values for controls that are shared across charts
including this one.
control_ids: the DIV IDs for controls that are shared across charts including this one.
schema: an optional schema for the data; if not supplied one will be inferred.
Returns:
A string containing the HTML for the chart.
"""
div_id = _html.Html.next_id()
controls_html = ''
if control_defaults is None:
control_defaults = {}
if control_ids is None:
control_ids = []
if chart_options is not None and 'variables' in chart_options:
controls = chart_options['variables']
del chart_options['variables'] # Just to make sure GCharts doesn't see them.
try:
item = get_notebook_item(source)
_, variable_defaults = datalab.data.SqlModule.get_sql_statement_with_environment(item, '')
except Exception:
variable_defaults = {}
controls_html, defaults, ids = parse_control_options(controls, variable_defaults)
# We augment what we are passed so that in principle we can have controls that are
# shared by charts as well as controls that are specific to a chart.
control_defaults.update(defaults)
control_ids.extend(ids),
_HTML_TEMPLATE = """
<div class="bqgc-container">
{controls}
<div class="bqgc {extra_class}" id="{id}">
</div>
</div>
<script>
require.config({{
paths: {{
d3: '//cdnjs.cloudflare.com/ajax/libs/d3/3.4.13/d3',
plotly: 'https://cdn.plot.ly/plotly-1.5.1.min.js?noext',
jquery: '//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min'
}},
map: {{
'*': {{
datalab: 'nbextensions/gcpdatalab'
}}
}},
shim: {{
plotly: {{
deps: ['d3', 'jquery'],
exports: 'plotly'
}}
}}
}});
require(['datalab/charting',
'datalab/element!{id}',
'base/js/events',
'datalab/style!/nbextensions/gcpdatalab/charting.css'
],
function(charts, dom, events) {{
charts.render(
'{driver}',
dom,
events,
'{chart_type}',
{control_ids},
{data},
{options},
{refresh_data},
{refresh_interval},
{total_rows});
}}
);
</script>
"""
count = 25 if chart_type == 'paged_table' else -1
data, total_count = get_data(source, fields, control_defaults, 0, count, schema)
if refresh_data is None:
if isinstance(source, basestring):
source_index = get_data_source_index(source)
refresh_data = {'source_index': source_index, 'name': source_index}
else:
refresh_data = {'name': 'raw data'}
refresh_data['fields'] = fields
# TODO(gram): check if we need to augment env with user_ns
return _HTML_TEMPLATE \
.format(driver=driver_name,
controls=controls_html,
id=div_id,
chart_type=chart_type,
extra_class=" bqgc-controlled" if len(controls_html) else '',
data=json.dumps(data, cls=datalab.utils.JSONEncoder),
options=json.dumps(chart_options, cls=datalab.utils.JSONEncoder),
refresh_data=json.dumps(refresh_data, cls=datalab.utils.JSONEncoder),
refresh_interval=refresh_interval,
control_ids=str(control_ids),
total_rows=total_count)
def profile_df(df):
""" Generate a profile of data in a dataframe.
Args:
df: the Pandas dataframe.
"""
# The bootstrap CSS messes up the Datalab display so we tweak it to not have an effect.
# TODO(gram): strip it out rather than this kludge.
return IPython.core.display.HTML(
pandas_profiling.ProfileReport(df).html.replace('bootstrap', 'nonexistent'))
| apache-2.0 |
kashif/scikit-learn | examples/gaussian_process/plot_gpr_noisy.py | 104 | 3778 | """
=============================================================
Gaussian process regression (GPR) with noise-level estimation
=============================================================
This example illustrates that GPR with a sum-kernel including a WhiteKernel can
estimate the noise level of data. An illustration of the
log-marginal-likelihood (LML) landscape shows that there exist two local
maxima of LML. The first corresponds to a model with a high noise level and a
large length scale, which explains all variations in the data by noise. The
second one has a smaller noise level and shorter length scale, which explains
most of the variation by the noise-free functional relationship. The second
model has a higher likelihood; however, depending on the initial value for the
hyperparameters, the gradient-based optimization might also converge to the
high-noise solution. It is thus important to repeat the optimization several
times for different initializations.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
rng = np.random.RandomState(0)
X = rng.uniform(0, 5, 20)[:, np.newaxis]
y = 0.5 * np.sin(3 * X[:, 0]) + rng.normal(0, 0.5, X.shape[0])
# First run
plt.figure(0)
kernel = 1.0 * RBF(length_scale=100.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0).fit(X, y)
X_ = np.linspace(0, 5, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - np.sqrt(np.diag(y_cov)),
y_mean + np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k')
plt.plot(X_, 0.5*np.sin(3*X_), 'r', lw=3, zorder=9)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.title("Initial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
% (kernel, gp.kernel_,
gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
# Second run
plt.figure(1)
kernel = 1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0).fit(X, y)
X_ = np.linspace(0, 5, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - np.sqrt(np.diag(y_cov)),
y_mean + np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k')
plt.plot(X_, 0.5*np.sin(3*X_), 'r', lw=3, zorder=9)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.title("Initial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
% (kernel, gp.kernel_,
gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
# Plot LML landscape
plt.figure(2)
theta0 = np.logspace(-2, 3, 49)
theta1 = np.logspace(-2, 0, 50)
Theta0, Theta1 = np.meshgrid(theta0, theta1)
LML = [[gp.log_marginal_likelihood(np.log([0.36, Theta0[i, j], Theta1[i, j]]))
for i in range(Theta0.shape[0])] for j in range(Theta0.shape[1])]
LML = np.array(LML).T
vmin, vmax = (-LML).min(), (-LML).max()
vmax = 50
plt.contour(Theta0, Theta1, -LML,
levels=np.logspace(np.log10(vmin), np.log10(vmax), 50),
norm=LogNorm(vmin=vmin, vmax=vmax))
plt.colorbar()
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Length-scale")
plt.ylabel("Noise-level")
plt.title("Log-marginal-likelihood")
plt.tight_layout()
plt.show()
| bsd-3-clause |
jeffshek/betterself | apis/betterself/v1/exports/views.py | 1 | 4253 | import io
import pandas as pd
from django.http import HttpResponse
from rest_framework.views import APIView
from analytics.events.utils.dataframe_builders import SupplementEventsDataframeBuilder, \
ProductivityLogEventsDataframeBuilder, SleepActivityDataframeBuilder, UserActivityEventDataframeBuilder
from constants import SLEEP_MINUTES_COLUMN
from events.models import SupplementLog, SleepLog, UserActivityLog, DailyProductivityLog
class UserExportAllData(APIView):
throttle_scope = 'user_export_all_data'
@staticmethod
def _write_to_workbook(writer, dataframe, worksheet_name):
dataframe.to_excel(writer, worksheet_name)
worksheets = writer.sheets
worksheet = worksheets[worksheet_name]
# Setting the column this wide looks good for dates representation
# Freezing at 1, 1 makes being able to scroll not a burden
worksheet.set_column('A:A', 17)
worksheet.freeze_panes(1, 1)
def get(self, request):
user = request.user
bytes_io = io.BytesIO()
writer = pd.ExcelWriter(bytes_io, engine='xlsxwriter', options={'remove_timezone': True})
# supplement events
supplement_events_worksheet_name = 'SupplementEvents'
supplement_events = SupplementLog.objects.filter(user=user)
df_builder = SupplementEventsDataframeBuilder(supplement_events)
supplement_events_df = df_builder.get_flat_daily_dataframe()
self._write_to_workbook(writer, supplement_events_df, supplement_events_worksheet_name)
# sleep events
sleep_activities_worksheet_name = 'SleepActivities'
sleep_activities = SleepLog.objects.filter(user=user)
df_builder = SleepActivityDataframeBuilder(sleep_activities)
sleep_activities_series = df_builder.get_sleep_history_series()
self._write_to_workbook(writer, sleep_activities_series, sleep_activities_worksheet_name)
# user activity events
user_activity_events_sheet_name = 'UserActivityEvents'
user_activity_events = UserActivityLog.objects.filter(user=user)
df_builder = UserActivityEventDataframeBuilder(user_activity_events)
user_activity_events_df = df_builder.get_flat_daily_dataframe()
self._write_to_workbook(writer, user_activity_events_df, user_activity_events_sheet_name)
# productivity logs
productivity_log_sheet_name = 'DailyProductivityLog'
productivity_log = DailyProductivityLog.objects.filter(user=user)
df_builder = ProductivityLogEventsDataframeBuilder(productivity_log)
# odd why this one isn't sorted the right way
productivity_log_df = df_builder.get_flat_daily_dataframe().sort_index(ascending=True)
self._write_to_workbook(writer, productivity_log_df, productivity_log_sheet_name)
all_dataframes = [productivity_log_df, supplement_events_df, user_activity_events_df]
concat_dataframe = pd.concat(all_dataframes, axis=1)
# include sleep which is a series and not a dataframe
cumulative_log_sheet_name = 'Aggregate Log'
concat_dataframe[SLEEP_MINUTES_COLUMN] = sleep_activities_series
self._write_to_workbook(writer, concat_dataframe, cumulative_log_sheet_name)
cumulative_14_day_dataframe_sheet_name = 'Aggregate 14 Log'
cumulative_14_day_dataframe = concat_dataframe.rolling(window=14, min_periods=1).sum()[14:]
self._write_to_workbook(writer, cumulative_14_day_dataframe, cumulative_14_day_dataframe_sheet_name)
cumulative_28_day_dataframe_sheet_name = 'Aggregate 28 Log'
cumulative_28_day_dataframe = concat_dataframe.rolling(window=28, min_periods=1).sum()[28:]
self._write_to_workbook(writer, cumulative_28_day_dataframe, cumulative_28_day_dataframe_sheet_name)
# make sure all the output gets writen to bytes io
writer.close()
# http response because we are providing data and not doing any template / rendering
response = HttpResponse(
bytes_io.getvalue(),
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=user_export_data.xlsx'
return response
| mit |
dshen1/trading-with-python | cookbook/workingWithDatesAndTime.py | 77 | 1551 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 16 17:45:02 2011
@author: jev
"""
import time
import datetime as dt
from pandas import *
from pandas.core import datetools
# basic functions
print 'Epoch start: %s' % time.asctime(time.gmtime(0))
print 'Seconds from epoch: %.2f' % time.time()
today = dt.date.today()
print type(today)
print 'Today is %s' % today.strftime('%Y.%m.%d')
# parse datetime
d = dt.datetime.strptime('20120803 21:59:59',"%Y%m%d %H:%M:%S")
# time deltas
someDate = dt.date(2011,8,1)
delta = today - someDate
print 'Delta :', delta
# calculate difference in dates
delta = dt.timedelta(days=20)
print 'Today-delta=', today-delta
t = dt.datetime(*time.strptime('3/30/2004',"%m/%d/%Y")[0:5])
# the '*' operator unpacks the tuple, producing the argument list.
print t
# print every 3d wednesday of the month
for month in xrange(1,13):
t = dt.date(2013,month,1)+datetools.relativedelta(months=1)
offset = datetools.Week(weekday=4)
if t.weekday()<>4:
t_new = t+3*offset
else:
t_new = t+2*offset
t_new = t_new-datetools.relativedelta(days=30)
print t_new.strftime("%B, %d %Y (%A)")
#rng = DateRange(t, t+datetools.YearEnd())
#print rng
# create a range of times
start = dt.datetime(2012,8,1)+datetools.relativedelta(hours=9,minutes=30)
end = dt.datetime(2012,8,1)+datetools.relativedelta(hours=22)
rng = date_range(start,end,freq='30min')
for r in rng: print r.strftime("%Y%m%d %H:%M:%S") | bsd-3-clause |
mne-tools/mne-tools.github.io | dev/_downloads/ceb76325480611dc7a2e973a3b7a782c/20_dipole_fit.py | 5 | 5301 | # -*- coding: utf-8 -*-
"""
============================================================
Source localization with equivalent current dipole (ECD) fit
============================================================
This shows how to fit a dipole :footcite:`Sarvas1987` using mne-python.
For a comparison of fits between MNE-C and mne-python, see
`this gist <https://gist.github.com/larsoner/ca55f791200fe1dc3dd2>`__.
"""
from os import path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.forward import make_forward_dipole
from mne.evoked import combine_evoked
from mne.label import find_pos_in_annot
from mne.simulation import simulate_evoked
from nilearn.plotting import plot_anat
from nilearn.datasets import load_mni152_template
data_path = mne.datasets.sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
fname_ave = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
fname_bem = op.join(subjects_dir, 'sample', 'bem', 'sample-5120-bem-sol.fif')
fname_trans = op.join(data_path, 'MEG', 'sample',
'sample_audvis_raw-trans.fif')
fname_surf_lh = op.join(subjects_dir, 'sample', 'surf', 'lh.white')
###############################################################################
# Let's localize the N100m (using MEG only)
evoked = mne.read_evokeds(fname_ave, condition='Right Auditory',
baseline=(None, 0))
evoked.pick_types(meg=True, eeg=False)
evoked_full = evoked.copy()
evoked.crop(0.07, 0.08)
# Fit a dipole
dip = mne.fit_dipole(evoked, fname_cov, fname_bem, fname_trans)[0]
# Plot the result in 3D brain with the MRI image.
dip.plot_locations(fname_trans, 'sample', subjects_dir, mode='orthoview')
###############################################################################
# Plot the result in 3D brain with the MRI image using Nilearn
# In MRI coordinates and in MNI coordinates (template brain)
trans = mne.read_trans(fname_trans)
subject = 'sample'
mni_pos = mne.head_to_mni(dip.pos, mri_head_t=trans,
subject=subject, subjects_dir=subjects_dir)
mri_pos = mne.head_to_mri(dip.pos, mri_head_t=trans,
subject=subject, subjects_dir=subjects_dir)
# In the meantime let's find an anatomical label for the best fitted dipole
best_dip_id = dip.gof.argmax()
best_dip_mri_pos = mri_pos[best_dip_id]
label = find_pos_in_annot(best_dip_mri_pos, subject=subject,
subjects_dir=subjects_dir,
annot='aparc.a2009s+aseg')
# Draw dipole position on MRI scan and add anatomical label from parcellation
t1_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
fig_T1 = plot_anat(t1_fname, cut_coords=mri_pos[0],
title=f'Dipole location: {label}')
template = load_mni152_template()
fig_template = plot_anat(template, cut_coords=mni_pos[0],
title='Dipole loc. (MNI Space)')
###############################################################################
# Calculate and visualise magnetic field predicted by dipole with maximum GOF
# and compare to the measured data, highlighting the ipsilateral (right) source
fwd, stc = make_forward_dipole(dip, fname_bem, evoked.info, fname_trans)
pred_evoked = simulate_evoked(fwd, stc, evoked.info, cov=None, nave=np.inf)
# find time point with highest GOF to plot
best_idx = np.argmax(dip.gof)
best_time = dip.times[best_idx]
print('Highest GOF %0.1f%% at t=%0.1f ms with confidence volume %0.1f cm^3'
% (dip.gof[best_idx], best_time * 1000,
dip.conf['vol'][best_idx] * 100 ** 3))
# remember to create a subplot for the colorbar
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=[10., 3.4],
gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1],
top=0.85))
vmin, vmax = -400, 400 # make sure each plot has same colour range
# first plot the topography at the time of the best fitting (single) dipole
plot_params = dict(times=best_time, ch_type='mag', outlines='skirt',
colorbar=False, time_unit='s')
evoked.plot_topomap(time_format='Measured field', axes=axes[0], **plot_params)
# compare this to the predicted field
pred_evoked.plot_topomap(time_format='Predicted field', axes=axes[1],
**plot_params)
# Subtract predicted from measured data (apply equal weights)
diff = combine_evoked([evoked, pred_evoked], weights=[1, -1])
plot_params['colorbar'] = True
diff.plot_topomap(time_format='Difference', axes=axes[2:], **plot_params)
fig.suptitle('Comparison of measured and predicted fields '
'at {:.0f} ms'.format(best_time * 1000.), fontsize=16)
fig.tight_layout()
###############################################################################
# Estimate the time course of a single dipole with fixed position and
# orientation (the one that maximized GOF) over the entire interval
dip_fixed = mne.fit_dipole(evoked_full, fname_cov, fname_bem, fname_trans,
pos=dip.pos[best_idx], ori=dip.ori[best_idx])[0]
dip_fixed.plot(time_unit='s')
##############################################################################
# References
# ----------
# .. footbibliography::
| bsd-3-clause |
SebDieBln/QGIS | python/plugins/processing/algs/qgis/MeanAndStdDevPlot.py | 19 | 3553 | # -*- coding: utf-8 -*-
"""
***************************************************************************
MeanAndStdDevPlot.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
import numpy as np
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterTable
from processing.core.parameters import ParameterTableField
from processing.core.outputs import OutputHTML
from processing.tools import vector
from processing.tools import dataobjects
class MeanAndStdDevPlot(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
NAME_FIELD = 'NAME_FIELD'
MEAN_FIELD = 'MEAN_FIELD'
STDDEV_FIELD = 'STDDEV_FIELD'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Mean and standard deviation plot')
self.group, self.i18n_group = self.trAlgorithm('Graphics')
self.addParameter(ParameterTable(self.INPUT,
self.tr('Input table')))
self.addParameter(ParameterTableField(self.NAME_FIELD,
self.tr('Category name field'), self.INPUT,
ParameterTableField.DATA_TYPE_ANY))
self.addParameter(ParameterTableField(self.MEAN_FIELD,
self.tr('Mean field'), self.INPUT))
self.addParameter(ParameterTableField(self.STDDEV_FIELD,
self.tr('StdDev field'), self.INPUT))
self.addOutput(OutputHTML(self.OUTPUT, self.tr('Plot')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
namefieldname = self.getParameterValue(self.NAME_FIELD)
meanfieldname = self.getParameterValue(self.MEAN_FIELD)
stddevfieldname = self.getParameterValue(self.STDDEV_FIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.values(layer, namefieldname, meanfieldname, stddevfieldname)
plt.close()
ind = np.arange(len(values[namefieldname]))
width = 0.8
plt.bar(ind, values[meanfieldname], width, color='r',
yerr=values[stddevfieldname],
error_kw=dict(ecolor='yellow'),
)
plt.xticks(ind, values[namefieldname], rotation=45)
plotFilename = output + '.png'
lab.savefig(plotFilename)
f = open(output, 'w')
f.write('<html><img src="' + plotFilename + '"/></html>')
f.close()
| gpl-2.0 |
snnn/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_test.py | 21 | 54488 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import json
import os
import tempfile
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from google.protobuf import text_format
from tensorflow.contrib import learn
from tensorflow.contrib import lookup
from tensorflow.python.training import training_util
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_state_pb2
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features],
0), array_ops.concat([labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
def _build_estimator_for_export_tests(tmpdir):
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column('feature', dimension=4)
]
est = linear.LinearRegressor(feature_columns)
est.fit(input_fn=_input_fn, steps=20)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
# hack in an op that uses an asset, in order to test asset export.
# this is not actually valid, of course.
def serving_input_fn_with_asset():
features, labels, inputs = serving_input_fn()
vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
vocab_file = gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(VOCAB_FILE_CONTENT)
vocab_file.close()
hashtable = lookup.HashTable(
lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
math_ops.to_int64(features['feature']))
return input_fn_utils.InputFnOps(features, labels, inputs)
return est, serving_input_fn_with_asset
def _build_estimator_for_resource_export_test():
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column('feature', dimension=4)
]
def resource_constant_model_fn(unused_features, unused_labels, mode):
"""A model_fn that loads a constant from a resource and serves it."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
const = constant_op.constant(-1, dtype=dtypes.int64)
table = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableModel')
update_global_step = training_util.get_global_step().assign_add(1)
if mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL):
key = constant_op.constant(['key'])
value = constant_op.constant([42], dtype=dtypes.int64)
train_op_1 = table.insert(key, value)
training_state = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableTrainingState')
training_op_2 = training_state.insert(key, value)
return (const, const,
control_flow_ops.group(train_op_1, training_op_2,
update_global_step))
if mode == model_fn.ModeKeys.INFER:
key = constant_op.constant(['key'])
prediction = table.lookup(key)
return prediction, const, update_global_step
est = estimator.Estimator(model_fn=resource_constant_model_fn)
est.fit(input_fn=_input_fn, steps=1)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
return est, serving_input_fn
class CheckCallsMonitor(monitors_lib.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
def _model_fn_ops(expected_features, expected_labels, actual_features,
actual_labels, mode):
assert_ops = tuple([
check_ops.assert_equal(
expected_features[k], actual_features[k], name='assert_%s' % k)
for k in expected_features
] + [
check_ops.assert_equal(
expected_labels, actual_labels, name='assert_labels')
])
with ops.control_dependencies(assert_ops):
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=training_util.get_global_step().assign_add(1))
def _make_input_fn(features, labels):
def _input_fn():
return {k: constant_op.constant(v)
for k, v in six.iteritems(features)}, constant_op.constant(labels)
return _input_fn
class EstimatorModelFnTest(test.TestCase):
def testModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, mode, params, config):
model_fn_call_count[0] += 1
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
est = estimator.Estimator(
model_fn=_model_fn, params=expected_params, config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testPartialModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
expected_foo = 45.
expected_bar = 46.
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, foo, mode, params, config, bar):
model_fn_call_count[0] += 1
self.assertEqual(expected_foo, foo)
self.assertEqual(expected_bar, bar)
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
partial_model_fn = functools.partial(
_model_fn, foo=expected_foo, bar=expected_bar)
est = estimator.Estimator(
model_fn=partial_model_fn,
params=expected_params,
config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testModelFnWithModelDir(self):
expected_param = {'some_param': 'some_value'}
expected_model_dir = tempfile.mkdtemp()
def _argument_checker(features,
labels,
mode,
params,
config=None,
model_dir=None):
_, _, _ = features, labels, config
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertEqual(model_dir, expected_model_dir)
return (constant_op.constant(0.), constant_op.constant(0.),
training_util.get_global_step().assign_add(1))
est = estimator.Estimator(
model_fn=_argument_checker,
params=expected_param,
model_dir=expected_model_dir)
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
update_global_step = training_util.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
loss = 100.0 - w
return None, loss, None
est = estimator.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing train_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
update_global_step = training_util.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == model_fn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
update_global_step = training_util.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(boston_input_fn, num_epochs=1),
as_iterable=True)
def testModelFnScaffoldInTraining(self):
self.is_init_fn_called = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
self.is_init_fn_called = True
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=training_util.get_global_step().assign_add(1),
scaffold=monitored_session.Scaffold(init_fn=_init_fn))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=boston_input_fn, steps=1)
self.assertTrue(self.is_init_fn_called)
def testModelFnScaffoldSaverUsage(self):
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
variables_lib.Variable(1., 'weight')
real_saver = saver_lib.Saver()
self.mock_saver = test.mock.Mock(
wraps=real_saver, saver_def=real_saver.saver_def)
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant([[1.]]),
loss=constant_op.constant(0.),
train_op=training_util.get_global_step().assign_add(1),
scaffold=monitored_session.Scaffold(saver=self.mock_saver))
def input_fn():
return {
'x': constant_op.constant([[1.]]),
}, constant_op.constant([[1.]])
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.save.called)
est.evaluate(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.restore.called)
est.predict(input_fn=input_fn)
self.assertTrue(self.mock_saver.restore.called)
def serving_input_fn():
serialized_tf_example = array_ops.placeholder(
dtype=dtypes.string, shape=[None], name='input_example_tensor')
features, labels = input_fn()
return input_fn_utils.InputFnOps(features, labels, {
'examples': serialized_tf_example
})
est.export_savedmodel(
os.path.join(est.model_dir, 'export'), serving_input_fn)
self.assertTrue(self.mock_saver.restore.called)
class EstimatorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=estimator.Estimator(model_fn=linear_model_fn),
train_input_fn=boston_input_fn,
eval_input_fn=boston_input_fn)
exp.test()
def testCheckpointSaverHookSuppressesTheDefaultOne(self):
saver_hook = test.mock.Mock(
spec=basic_session_run_hooks.CheckpointSaverHook)
saver_hook.before_run.return_value = None
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1, monitors=[saver_hook])
# test nothing is saved, due to suppressing default saver
with self.assertRaises(learn.NotFittedError):
est.evaluate(input_fn=boston_input_fn, steps=1)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = ops.get_default_graph().seed
return constant_op.constant([[1.]]), constant_op.constant([1.])
config = run_config.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAndRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(
model_fn=linear_model_fn, config=config, model_dir='test_dir')
self.assertEqual('test_dir', est.config.model_dir)
with self.assertRaisesRegexp(
ValueError, 'model_dir are set both in constructor and RunConfig, '
'but with different'):
estimator.Estimator(
model_fn=linear_model_fn, config=config, model_dir='different_dir')
def testModelDirIsCopiedToRunConfig(self):
config = run_config.RunConfig()
self.assertIsNone(config.model_dir)
est = estimator.Estimator(
model_fn=linear_model_fn, model_dir='test_dir', config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAsTempDir(self):
with test.mock.patch.object(tempfile, 'mkdtemp', return_value='temp_dir'):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertEqual('temp_dir', est.config.model_dir)
self.assertEqual('temp_dir', est.model_dir)
def testCheckInputs(self):
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7, 8], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7, 10], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(
ValueError,
'Either x or input_fn must be provided.',
est.fit,
x=None,
input_fn=None,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
x='X',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
y='Y',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and batch_size',
est.fit,
input_fn=iris_input_fn,
batch_size=100,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Inputs cannot be tensors. Please provide input_fn.',
est.fit,
x=constant_op.constant(1.),
steps=1)
def testUntrained(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
with self.assertRaises(learn.NotFittedError):
_ = est.score(x=boston.data, y=boston.target.astype(np.float64))
with self.assertRaises(learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.SKCompat(
estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
del est
# Create another estimator object with the same output dir.
est2 = estimator.SKCompat(
estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
self.assertLess(scores3['MSE'], scores['MSE'])
def test_checkpoint_contains_relative_paths(self):
tmpdir = tempfile.mkdtemp()
est = estimator.Estimator(
model_dir=tmpdir, model_fn=linear_model_fn_with_model_fn_ops)
est.fit(input_fn=boston_input_fn, steps=5)
checkpoint_file_content = file_io.read_file_to_string(
os.path.join(tmpdir, 'checkpoint'))
ckpt = checkpoint_state_pb2.CheckpointState()
text_format.Merge(checkpoint_file_content, ckpt)
self.assertEqual(ckpt.model_checkpoint_path, 'model.ckpt-5')
# TODO(b/78461127): Please modify tests to not directly rely on names of
# checkpoints.
self.assertAllEqual(['model.ckpt-0', 'model.ckpt-5'],
ckpt.all_model_checkpoint_paths)
def test_train_save_copy_reload(self):
tmpdir = tempfile.mkdtemp()
model_dir1 = os.path.join(tmpdir, 'model_dir1')
est1 = estimator.Estimator(
model_dir=model_dir1, model_fn=linear_model_fn_with_model_fn_ops)
est1.fit(input_fn=boston_input_fn, steps=5)
model_dir2 = os.path.join(tmpdir, 'model_dir2')
os.renames(model_dir1, model_dir2)
est2 = estimator.Estimator(
model_dir=model_dir2, model_fn=linear_model_fn_with_model_fn_ops)
self.assertEqual(5, est2.get_variable_value('global_step'))
est2.fit(input_fn=boston_input_fn, steps=5)
self.assertEqual(10, est2.get_variable_value('global_step'))
def testEstimatorParams(self):
boston = base.load_boston()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_params_fn, params={
'learning_rate': 0.01
}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testHooksNotChanged(self):
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
# We pass empty array and expect it to remain empty after calling
# fit and evaluate. Requires inside to copy this array if any hooks were
# added.
my_array = []
est.fit(input_fn=iris_input_fn, steps=100, monitors=my_array)
_ = est.evaluate(input_fn=iris_input_fn, steps=1, hooks=my_array)
self.assertEqual(my_array, [])
def testIrisIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
estimator.SKCompat(est).fit(x_iter, y_iter, steps=20)
eval_result = est.evaluate(input_fn=iris_input_fn, steps=1)
x_iter_eval = itertools.islice(iris.data, 100)
y_iter_eval = itertools.islice(iris.target, 100)
score_result = estimator.SKCompat(est).score(x_iter_eval, y_iter_eval)
print(score_result)
self.assertItemsEqual(eval_result.keys(), score_result.keys())
self.assertItemsEqual(['global_step', 'loss'], score_result.keys())
predictions = estimator.SKCompat(est).predict(x=iris.data)['class']
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainStepsIsIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = estimator.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {
'other': constant_op.constant([0, 0, 0])
}, constant_op.constant([0, 0, 0])
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitorsForFit(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(
input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testHooksForEvaluate(self):
class CheckCallHook(session_run_hook.SessionRunHook):
def __init__(self):
self.run_count = 0
def after_run(self, run_context, run_values):
self.run_count += 1
est = learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
hook = CheckCallHook()
est.evaluate(input_fn=boston_eval_fn, steps=3, hooks=[hook])
self.assertEqual(3, hook.run_count)
def testSummaryWriting(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = util_test.simple_values_from_events(
util_test.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testSummaryWritingWithSummaryProto(self):
def _streaming_mean_squared_error_histogram(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
metrics, update_ops = metric_ops.streaming_mean_squared_error(
predictions,
labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
return summary.histogram('histogram', metrics), update_ops
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(
input_fn=boston_input_fn,
steps=200,
metrics={
'MSE': _streaming_mean_squared_error_histogram
})
events = util_test.latest_events(est.model_dir + '/eval')
output_values = {}
for e in events:
if e.HasField('summary'):
for v in e.summary.value:
output_values[v.tag] = v
self.assertTrue('MSE' in output_values)
self.assertTrue(output_values['MSE'].HasField('histo'))
def testSummaryWritingWithTensor(self):
def _streaming_precition_mean_tensor(predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
return metric_ops.streaming_mean_tensor(
predictions,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(
input_fn=boston_input_fn,
steps=200,
metrics={
'PMT': _streaming_precition_mean_tensor
})
events = util_test.latest_events(est.model_dir + '/eval')
output_values = {}
for e in events:
if e.HasField('summary'):
for v in e.summary.value:
output_values[v.tag] = v
self.assertTrue('PMT' in output_values)
self.assertTrue(output_values['PMT'].HasField('tensor'))
def testLossInGraphCollection(self):
class _LossCheckerHook(session_run_hook.SessionRunHook):
def begin(self):
self.loss_collection = ops.get_collection(ops.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = estimator.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
def test_export_savedmodel(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
self.assertItemsEqual(['bogus_lookup', 'feature'], [
compat.as_str_any(x)
for x in graph.get_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS)
])
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_resource(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_resource_export_test()
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(export_dir_base, serving_input_fn)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('LookupTableModel' in graph_ops)
self.assertFalse('LookupTableTrainingState' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_graph_transforms(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base,
serving_input_fn,
assets_extra=assets_extra,
graph_rewrite_specs=[
estimator.GraphRewriteSpec(['tag_1'], []),
estimator.GraphRewriteSpec(['tag_2', 'tag_3'],
['strip_unused_nodes'])
])
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
# tag_1 is untransformed.
tags = ['tag_1']
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, tags, export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# Since there were no transforms, both save ops are still present.
self.assertTrue('save/SaveV2/tensor_names' in graph_ops)
self.assertTrue('save_1/SaveV2/tensor_names' in graph_ops)
# Since there were no transforms, the hash table lookup is still there.
self.assertTrue('hash_table_Lookup' in graph_ops)
# Restore, to validate that the export was well-formed.
# tag_2, tag_3 was subjected to strip_unused_nodes.
tags = ['tag_2', 'tag_3']
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, tags, export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# The Saver used to restore the checkpoint into the export Session
# was not added to the SAVERS collection, so strip_unused_nodes removes
# it. The one explicitly created in export_savedmodel is tracked in
# the MetaGraphDef saver_def field, so that one is retained.
# TODO(soergel): Make Savers sane again. I understand this is all a bit
# nuts but for now the test demonstrates what actually happens.
self.assertFalse('save/SaveV2/tensor_names' in graph_ops)
self.assertTrue('save_1/SaveV2/tensor_names' in graph_ops)
# The fake hash table lookup wasn't connected to anything; stripped.
self.assertFalse('hash_table_Lookup' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
class InferRealValuedColumnsTest(test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
estimator.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
estimator.infer_real_valued_columns_from_input(constant_op.constant(1.0))
def _assert_single_feature_column(self, expected_shape, expected_dtype,
feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual({
'':
parsing_ops.FixedLenFeature(
shape=expected_shape, dtype=expected_dtype)
}, feature_column.config)
def testInt32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int32), None))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int64), None))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testFloat32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float32), None))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float64), None))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
estimator.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (constant_op.constant(False, shape=[7, 8], dtype=dtypes.bool), None)
)
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (
constant_op.constant([['%d.0' % i
for i in xrange(8)]
for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column([_BOSTON_INPUT_DIM], dtypes.float64,
feature_columns)
def testIrisInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column([_IRIS_INPUT_DIM], dtypes.float64,
feature_columns)
class ReplicaDeviceSetterTest(test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ', {
'TF_CONFIG': json.dumps(tf_config)
}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ', {
'TF_CONFIG': json.dumps(tf_config)
}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': run_config.TaskType.WORKER,
'index': 3
}
}
with test.mock.patch.dict('os.environ', {
'TF_CONFIG': json.dumps(tf_config)
}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
test.main()
| apache-2.0 |
robbymeals/scikit-learn | benchmarks/bench_isotonic.py | 268 | 3046 | """
Benchmarks of isotonic regression performance.
We generate a synthetic dataset of size 10^n, for n in [min, max], and
examine the time taken to run isotonic regression over the dataset.
The timings are then output to stdout, or visualized on a log-log scale
with matplotlib.
This alows the scaling of the algorithm with the problem size to be
visualized and understood.
"""
from __future__ import print_function
import numpy as np
import gc
from datetime import datetime
from sklearn.isotonic import isotonic_regression
from sklearn.utils.bench import total_seconds
import matplotlib.pyplot as plt
import argparse
def generate_perturbed_logarithm_dataset(size):
return np.random.randint(-50, 50, size=n) \
+ 50. * np.log(1 + np.arange(n))
def generate_logistic_dataset(size):
X = np.sort(np.random.normal(size=size))
return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X))
DATASET_GENERATORS = {
'perturbed_logarithm': generate_perturbed_logarithm_dataset,
'logistic': generate_logistic_dataset
}
def bench_isotonic_regression(Y):
"""
Runs a single iteration of isotonic regression on the input data,
and reports the total time taken (in seconds).
"""
gc.collect()
tstart = datetime.now()
isotonic_regression(Y)
delta = datetime.now() - tstart
return total_seconds(delta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Isotonic Regression benchmark tool")
parser.add_argument('--iterations', type=int, required=True,
help="Number of iterations to average timings over "
"for each problem size")
parser.add_argument('--log_min_problem_size', type=int, required=True,
help="Base 10 logarithm of the minimum problem size")
parser.add_argument('--log_max_problem_size', type=int, required=True,
help="Base 10 logarithm of the maximum problem size")
parser.add_argument('--show_plot', action='store_true',
help="Plot timing output with matplotlib")
parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(),
required=True)
args = parser.parse_args()
timings = []
for exponent in range(args.log_min_problem_size,
args.log_max_problem_size):
n = 10 ** exponent
Y = DATASET_GENERATORS[args.dataset](n)
time_per_iteration = \
[bench_isotonic_regression(Y) for i in range(args.iterations)]
timing = (n, np.mean(time_per_iteration))
timings.append(timing)
# If we're not plotting, dump the timing to stdout
if not args.show_plot:
print(n, np.mean(time_per_iteration))
if args.show_plot:
plt.plot(*zip(*timings))
plt.title("Average time taken running isotonic regression")
plt.xlabel('Number of observations')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.loglog()
plt.show()
| bsd-3-clause |
almarklein/bokeh | bokeh/charts/builder/tests/test_boxplot_builder.py | 1 | 4824 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from collections import OrderedDict
import unittest
import numpy as np
import pandas as pd
from bokeh.charts import BoxPlot
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestBoxPlot(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict([
('bronze', np.array([7.0, 10.0, 8.0, 7.0, 4.0, 4.0, 1.0, 5.0, 2.0, 1.0,
4.0, 2.0, 1.0, 2.0, 4.0, 1.0, 0.0, 1.0, 1.0, 2.0,
0.0, 1.0, 0.0, 0.0, 1.0, 1.0])),
('silver', np.array([8., 4., 6., 4., 8., 3., 3., 2., 5., 6.,
1., 4., 2., 3., 2., 0., 0., 1., 2., 1.,
3., 0., 0., 1., 0., 0.])),
('gold', np.array([6., 6., 6., 8., 4., 8., 6., 3., 2., 2., 2., 1.,
3., 1., 0., 5., 4., 2., 0., 0., 0., 1., 1., 0., 0.,
0.]))
])
xyvaluesdf = pd.DataFrame(xyvalues)
exptected_datarect = {
'colors': ['#f22c40', '#5ab738', '#407ee7', '#df5320', '#00ad9c', '#c33ff3'],
'groups': ['bronze', 'silver', 'gold'],
'iqr_centers': [2.5, 2.5, 2.5],
'iqr_lengths': [3.0, 3.0, 4.5],
'lower_center_boxes': [1.25, 1.5, 1.125],
'lower_height_boxes': [0.5, 1.0, 1.75],
'upper_center_boxes': [2.75, 3.0, 3.375],
'upper_height_boxes': [2.5, 2.0, 2.75],
'width': [0.8, 0.8, 0.8]
}
expected_scatter = {
'colors': ['#f22c40', '#f22c40', '#f22c40', '#f22c40', '#5ab738', '#5ab738'],
'out_x': ['bronze', 'bronze', 'bronze', 'bronze', 'silver', 'silver'],
'out_y': [7.0, 10.0, 8.0, 7.0, 8.0, 8.0]
}
expected_seg = {
'lower': [-3.0, -2.5, -4.75],
'q0': [1.0, 1.0, 0.25],
'q2': [4.0, 4.0, 4.75],
'upper': [6.0, 6.5, 8.75]
}
groups = ['bronze', 'silver', 'gold']
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
bp = create_chart(BoxPlot, _xy, marker='circle', outliers=True)
builder = bp._builders[0]
self.assertEqual(sorted(builder._groups), sorted(groups))
for key, expected_v in exptected_datarect.items():
self.assertEqual(builder._data_rect[key], expected_v)
for key, expected_v in expected_scatter.items():
self.assertEqual(builder._data_scatter[key], expected_v)
for key, expected_v in expected_seg.items():
self.assertEqual(builder._data_segment[key], expected_v)
self.assertEqual(len(builder._legends), 3)
lvalues = [
np.array([7.0, 10.0, 8.0, 7.0, 4.0, 4.0, 1.0, 5.0, 2.0, 1.0,
4.0, 2.0, 1.0, 2.0, 4.0, 1.0, 0.0, 1.0, 1.0, 2.0,
0.0, 1.0, 0.0, 0.0, 1.0, 1.0]),
np.array([8., 4., 6., 4., 8., 3., 3., 2., 5., 6.,
1., 4., 2., 3., 2., 0., 0., 1., 2., 1.,
3., 0., 0., 1., 0., 0.]),
np.array([6., 6., 6., 8., 4., 8., 6., 3., 2., 2., 2., 1.,
3., 1., 0., 5., 4., 2., 0., 0., 0., 1., 1., 0., 0.,
0.])
]
groups = exptected_datarect['groups'] = ['0', '1', '2']
expected_scatter['out_x'] = ['0', '0', '0', '0', '1', '1']
for i, _xy in enumerate([lvalues, np.array(lvalues)]):
bp = create_chart(BoxPlot, _xy, marker='circle', outliers=True)
builder = bp._builders[0]
self.assertEqual(sorted(builder._groups), sorted(groups))
for key, expected_v in exptected_datarect.items():
self.assertEqual(builder._data_rect[key], expected_v)
for key, expected_v in expected_scatter.items():
self.assertEqual(builder._data_scatter[key], expected_v)
for key, expected_v in expected_seg.items():
self.assertEqual(builder._data_segment[key], expected_v)
self.assertEqual(len(builder._legends), 3)
| bsd-3-clause |
stevereyes01/pycbc | examples/distributions/spin_spatial_distr_example.py | 14 | 1973 | import numpy
import matplotlib.pyplot as plt
import pycbc.coordinates as co
from mpl_toolkits.mplot3d import Axes3D
from pycbc import distributions
# We can choose any bounds between 0 and pi for this distribution but in units
# of pi so we use between 0 and 1.
theta_low = 0.
theta_high = 1.
# Units of pi for the bounds of the azimuthal angle which goes from 0 to 2 pi.
phi_low = 0.
phi_high = 2.
# Create a distribution object from distributions.py
# Here we are using the Uniform Solid Angle function which takes
# theta = polar_bounds(theta_lower_bound to a theta_upper_bound), and then
# phi = azimuthal_bound(phi_lower_bound to a phi_upper_bound).
uniform_solid_angle_distribution = distributions.UniformSolidAngle(
polar_bounds=(theta_low,theta_high),
azimuthal_bounds=(phi_low,phi_high))
# Now we can take a random variable sample from that distribution.
# In this case we want 50000 samples.
solid_angle_samples = uniform_solid_angle_distribution.rvs(size=10000)
# Make a spin 1 magnitude since solid angle is only 2 dimensions and we need a
# 3rd dimension for a 3D plot that we make later on.
spin_mag = numpy.ndarray(shape=(10000), dtype=float)
for i in range(0,10000):
spin_mag[i] = 1.
# Use pycbc.coordinates as co. Use spherical_to_cartesian function to
# convert from spherical polar coordinates to cartesian coordinates.
spinx, spiny, spinz = co.spherical_to_cartesian(spin_mag,
solid_angle_samples['phi'],
solid_angle_samples['theta'])
# Plot the spherical distribution of spins to make sure that we
# distributed across the surface of a sphere.
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(spinx, spiny, spinz, s=1)
ax.set_xlabel('Spin X Axis')
ax.set_ylabel('Spin Y Axis')
ax.set_zlabel('Spin Z Axis')
plt.show()
| gpl-3.0 |
martinpopel/vowpal_wabbit | python/test_sklearn_vw.py | 1 | 3945 | from collections import namedtuple
import numpy as np
import pytest
from sklearn_vw import VW, VWClassifier, VWRegressor, tovw
from sklearn import datasets
from sklearn.utils.validation import NotFittedError
from scipy.sparse import csr_matrix
"""
Test utilities to support integration of Vowpal Wabbit and scikit-learn
"""
Dataset = namedtuple('Dataset', 'x, y')
@pytest.fixture(scope='module')
def data():
x, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
x = x.astype(np.float32)
return Dataset(x=x, y=y)
class TestVW:
def test_init(self):
assert isinstance(VW(), VW)
def test_fit(self, data):
model = VW(loss_function='logistic')
assert not hasattr(model, 'fit_')
model.fit(data.x, data.y)
assert model.fit_
def test_passes(self, data):
n_passes = 2
model = VW(loss_function='logistic', passes=n_passes)
assert model.passes == n_passes
model.fit(data.x, data.y)
weights = model.get_coefs()
model = VW(loss_function='logistic')
# first pass weights should not be the same
model.fit(data.x, data.y)
assert not np.allclose(weights.data, model.get_coefs().data)
# second pass weights should match
model.fit(data.x, data.y)
assert np.allclose(weights.data, model.get_coefs().data)
def test_predict_not_fit(self, data):
model = VW(loss_function='logistic')
with pytest.raises(NotFittedError):
model.predict(data.x[0], data.y[0])
def test_predict(self, data):
model = VW(loss_function='logistic')
model.fit(data.x, data.y)
assert np.isclose(model.predict(data.x[:1][:1])[0], 0.406929)
def test_predict_no_convert(self):
model = VW(loss_function='logistic')
model.fit(['-1 | bad', '1 | good'], convert_to_vw=False)
assert np.isclose(model.predict(['| good'], convert_to_vw=False)[0], 0.245515)
def test_set_params(self):
model = VW()
assert 'l' not in model.params
model.set_params(l=0.1)
assert model.params['l'] == 0.1
# confirm model params reset with new construction
model = VW()
assert 'l' not in model.params
def test_get_coefs(self, data):
model = VW()
model.fit(data.x, data.y)
weights = model.get_coefs()
print weights.data
assert np.allclose(weights.indices, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 116060])
def test_get_intercept(self, data):
model = VW()
model.fit(data.x, data.y)
intercept = model.get_intercept()
assert isinstance(intercept, float)
class TestVWClassifier:
def test_init(self):
assert isinstance(VWClassifier(), VWClassifier)
def test_decision_function(self, data):
classes = np.array([-1., 1.])
raw_model = VW(loss_function='logistic')
raw_model.fit(data.x, data.y)
predictions = raw_model.predict(data.x)
class_indices = (predictions > 0).astype(np.int)
class_predictions = classes[class_indices]
model = VWClassifier()
model.fit(data.x, data.y)
assert np.allclose(class_predictions, model.predict(data.x))
class TestVWRegressor:
def test_init(self):
assert isinstance(VWRegressor(), VWRegressor)
def test_predict(self, data):
raw_model = VW()
raw_model.fit(data.x, data.y)
model = VWRegressor()
model.fit(data.x, data.y)
assert np.allclose(raw_model.predict(data.x), model.predict(data.x))
def test_tovw():
x = np.array([[1.2, 3.4, 5.6, 1.0, 10], [7.8, 9.10, 11, 0, 20]])
y = np.array([1, -1])
w = [1, 2]
expected = ['1 1 | 0:1.2 1:3.4 2:5.6 3:1 4:10',
'-1 2 | 0:7.8 1:9.1 2:11 4:20']
assert tovw(x=x, y=y, sample_weight=w) == expected
assert tovw(x=csr_matrix(x), y=y, sample_weight=w) == expected
| bsd-3-clause |
DTOcean/dtocean-core | tests/test_data_definitions_strata.py | 1 | 4592 | import pytest
import numpy as np
import matplotlib.pyplot as plt
from aneris.control.factory import InterfaceFactory
from dtocean_core.core import (AutoFileInput,
AutoFileOutput,
AutoPlot,
Core)
from dtocean_core.data import CoreMetaData
from dtocean_core.data.definitions import Strata
def test_Strata_available():
new_core = Core()
all_objs = new_core.control._store._structures
assert "Strata" in all_objs.keys()
def test_Strata():
x = np.linspace(0.,1000.,100)
y = np.linspace(0.,300.,30)
nx = len(x)
ny = len(y)
X, Y = np.meshgrid(x,y)
Z = -X * 0.1 - 1
depths = Z.T[:, :, np.newaxis]
sediments = np.chararray((nx,ny,1), itemsize=20)
sediments[:] = "rock"
raw = {"values": {'depth': depths,
'sediment': sediments},
"coords": [x, y, ["layer 1"]]}
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ["UTM x",
"UTM y",
"layer",
"depth",
"sediment"]})
test = Strata()
a = test.get_data(raw, meta)
b = test.get_value(a)
assert b['depth'].values.shape == (100, 30, 1)
assert (b['sediment'].values == "rock").all()
def test_get_None():
test = Strata()
result = test.get_value(None)
assert result is None
@pytest.mark.parametrize("fext", [".nc"])
def test_Strata_auto_file(tmpdir, fext):
test_dir = tmpdir.mkdir("sub")
test_path_out = test_dir.join("test{}".format(fext))
test_path_out_str = str(test_path_out)
test_path_in = test_dir.join("test_depth{}".format(fext))
test_path_in_str = str(test_path_in)
x = np.linspace(0.,1000.,100)
y = np.linspace(0.,300.,30)
nx = len(x)
ny = len(y)
X, Y = np.meshgrid(x,y)
Z = -X * 0.1 - 1
depths = Z.T[:, :, np.newaxis]
sediments = np.chararray((nx,ny,1), itemsize=20)
sediments[:] = "rock"
raw = {"values": {'depth': depths,
'sediment': sediments},
"coords": [x, y, ["layer 1"]]}
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ["UTM x",
"UTM y",
"layer",
"depth",
"sediment"]})
test = Strata()
fout_factory = InterfaceFactory(AutoFileOutput)
FOutCls = fout_factory(meta, test)
fout = FOutCls()
fout._path = test_path_out_str
fout.data.result = test.get_data(raw, meta)
fout.connect()
assert len(tmpdir.listdir()) == 1
fin_factory = InterfaceFactory(AutoFileInput)
FInCls = fin_factory(meta, test)
fin = FInCls()
fin.meta.result = meta
fin._path = test_path_in_str
fin.connect()
result = test.get_data(fin.data.result, meta)
assert result['depth'].values.shape == (100, 30, 1)
assert (result['sediment'].values == "rock").all()
def test_Strata_auto_plot(tmpdir):
x = np.linspace(0.,1000.,100)
y = np.linspace(0.,300.,30)
nx = len(x)
ny = len(y)
X, Y = np.meshgrid(x,y)
Z = -X * 0.1 - 1
depths = Z.T[:, :, np.newaxis]
sediments = np.chararray((nx,ny,1), itemsize=20)
sediments[:] = "rock"
raw = {"values": {'depth': depths,
'sediment': sediments},
"coords": [x, y, ["layer 1"]]}
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ["UTM x",
"UTM y",
"layer",
"depth",
"sediment"]})
test = Strata()
fout_factory = InterfaceFactory(AutoPlot)
PlotCls = fout_factory(meta, test)
plot = PlotCls()
plot.data.result = test.get_data(raw, meta)
plot.meta.result = meta
plot.connect()
assert len(plt.get_fignums()) == 1
plt.close("all")
| gpl-3.0 |
benvermaercke/pyqtgraph | examples/cx_freeze/setup.py | 26 | 1174 | # Build with `python setup.py build_exe`
from cx_Freeze import setup, Executable
import shutil
from glob import glob
# Remove the build folder
shutil.rmtree("build", ignore_errors=True)
shutil.rmtree("dist", ignore_errors=True)
import sys
includes = ['PyQt4.QtCore', 'PyQt4.QtGui', 'sip', 'pyqtgraph.graphicsItems',
'numpy', 'atexit']
excludes = ['cvxopt','_gtkagg', '_tkagg', 'bsddb', 'curses', 'email', 'pywin.debugger',
'pywin.debugger.dbgcon', 'pywin.dialogs', 'tcl','tables',
'Tkconstants', 'Tkinter', 'zmq','PySide','pysideuic','scipy','matplotlib']
if sys.version[0] == '2':
# causes syntax error on py2
excludes.append('PyQt4.uic.port_v3')
base = None
if sys.platform == "win32":
base = "Win32GUI"
build_exe_options = {'excludes': excludes,
'includes':includes, 'include_msvcr':True,
'compressed':True, 'copy_dependent_files':True, 'create_shared_zip':True,
'include_in_shared_zip':True, 'optimize':2}
setup(name = "cx_freeze plot test",
version = "0.1",
description = "cx_freeze plot test",
options = {"build_exe": build_exe_options},
executables = [Executable("plotTest.py", base=base)])
| mit |
dataship/frame | test/data/groupby.where.sum/operation.py | 1 | 1360 | """sum operation
"""
import pandas as pd
import math
def convert_to_dict(r):
# returns a dictionary whose keys are tuples
tupled = r.to_dict()
# convert tuple keys to nested dictionaries
dicted = {}
for (t, k) in tupled.items():
level = dicted
# create a nested dictionary for each item in the tuple
for l in t[:-1]:
if l in level:
level = level[l]
else:
level[l] = {}
level = level[l]
# the last level points to the value
l = t[-1]
level[l] = k.item() # convert numpy type to python type
return dicted
SAMPLE = 10
def execute(options, id_columns, value_columns):
'''
id_columns - a dictionary mapping names (strings) to numpy arrays
value_columns - a dictionary mapping names (strings) to numpy arrays
'''
columns = id_columns.copy()
columns.update(value_columns)
frame = pd.DataFrame(columns)
id_name = "id_0"
value_name = "value_0"
# create a subset of the column values
column = id_columns[id_name]
uniques = set(column[:SAMPLE])
l = int(math.ceil(len(uniques)/2.0))
subset = sorted(list(uniques))[:l]
#print(subset)
#frame.loc[frame[id_name] == 1, value_name].sum()
#v = frame.loc[frame[id_name].isin(subset), value_name].sum()
filtered = frame.loc[frame[id_name].isin(subset)]
grouped = filtered.groupby(by=list(id_columns.keys()))
return convert_to_dict(grouped.sum()["value_0"])
| mit |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pandas/core/window.py | 3 | 68121 | """
provide a generic structure to support window functions,
similar to how we have a Groupby object
"""
from __future__ import division
import warnings
import numpy as np
from collections import defaultdict
from datetime import timedelta
from pandas.core.dtypes.generic import (
ABCSeries,
ABCDataFrame,
ABCDatetimeIndex,
ABCTimedeltaIndex,
ABCPeriodIndex)
from pandas.core.dtypes.common import (
is_integer,
is_bool,
is_float_dtype,
is_integer_dtype,
needs_i8_conversion,
is_timedelta64_dtype,
is_list_like,
_ensure_float64,
is_scalar)
import pandas as pd
from pandas.core.base import (PandasObject, SelectionMixin,
GroupByMixin)
import pandas.core.common as com
import pandas._libs.window as _window
from pandas.tseries.offsets import DateOffset
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (Substitution, Appender,
cache_readonly)
from pandas.core.generic import _shared_docs
from textwrap import dedent
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
same type as input
See also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
"""
class _Window(PandasObject, SelectionMixin):
_attributes = ['window', 'min_periods', 'freq', 'center', 'win_type',
'axis', 'on', 'closed']
exclusions = set()
def __init__(self, obj, window=None, min_periods=None, freq=None,
center=False, win_type=None, axis=0, on=None, closed=None,
**kwargs):
if freq is not None:
warnings.warn("The freq kw is deprecated and will be removed in a "
"future version. You can resample prior to passing "
"to a window function", FutureWarning, stacklevel=3)
self.__dict__.update(kwargs)
self.blocks = []
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.freq = freq
self.center = center
self.win_type = win_type
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self):
return None
@property
def _on(self):
return None
@property
def is_freq_type(self):
return self.win_type == 'freq'
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not \
is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in \
['right', 'both', 'left', 'neither']:
raise ValueError("closed must be 'right', 'left', 'both' or "
"'neither'")
def _convert_freq(self, how=None):
""" resample according to the how, return a new object """
obj = self._selected_obj
index = None
if (self.freq is not None and
isinstance(obj, (ABCSeries, ABCDataFrame))):
if how is not None:
warnings.warn("The how kw argument is deprecated and removed "
"in a future version. You can resample prior "
"to passing to a window function", FutureWarning,
stacklevel=6)
obj = obj.resample(self.freq).aggregate(how or 'asfreq')
return obj, index
def _create_blocks(self, how):
""" split data into blocks & return conformed data """
obj, index = self._convert_freq(how)
if index is not None:
index = self._on
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]),
copy=False)
blocks = obj.as_blocks(copy=False).values()
return blocks, obj, index
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self):
return self.__class__.__name__
def __unicode__(self):
""" provide a nice str repr of our rolling object """
attrs = ["{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None]
return "{klass} [{attrs}]".format(klass=self._window_type,
attrs=','.join(attrs))
def _get_index(self, index=None):
"""
Return index as ndarrays
Returns
-------
tuple of (index, index_as_ndarray)
"""
if self.is_freq_type:
if index is None:
index = self._on
return index, index.asi8
return index, index
def _prep_values(self, values=None, kill_inf=True, how=None):
if values is None:
values = getattr(self._selected_obj, 'values', self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = _ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = _ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError("ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(
action=self._window_type,
dtype=values.dtype))
else:
try:
values = _ensure_float64(values)
except (ValueError, TypeError):
raise TypeError("cannot handle this type -> {0}"
"".format(values.dtype))
if kill_inf:
values = values.copy()
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None):
""" wrap a single result """
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
result = pd.to_timedelta(
result.ravel(), unit='ns').values.reshape(result.shape)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj):
"""
wrap the results
Paramters
---------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
"""
from pandas import Series
from pandas.core.index import _ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None \
and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = _ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
if not len(final):
return obj.astype('float64')
return pd.concat(final, axis=1).reindex(columns=columns,
copy=False)
def _center_window(self, result, window):
""" center the result in the window """
if self.axis > result.ndim - 1:
raise ValueError("Requested axis is larger then no. of argument "
"dimensions")
from pandas import Series, DataFrame
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (Series, DataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
return self.apply(arg, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs['sum'] = dedent("""
%(name)s sum
Parameters
----------
how : string, default None (DEPRECATED)
Method for down- or re-sampling""")
_shared_docs['mean'] = dedent("""
%(name)s mean
Parameters
----------
how : string, default None (DEPRECATED)
Method for down- or re-sampling""")
class Window(_Window):
"""
Provides rolling window calculcations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
this will default to 1.
freq : string or DateOffset object, optional (default None) (DEPRECATED)
Frequency to conform the data to before computing the statistic.
Specified as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
Provide a window type. See the notes below.
on : string, optional
For a DataFrame, column on which to calculate
the rolling window, rather than the index
closed : string, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
axis : int or string, default 0
Returns
-------
a Window or Rolling sub-classed for the particular operation
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 1.0
2 2.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicity set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
....: index = [pd.Timestamp('20130101 09:00:00'),
....: pd.Timestamp('20130101 09:00:02'),
....: pd.Timestamp('20130101 09:00:03'),
....: pd.Timestamp('20130101 09:00:05'),
....: pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width).
"""
def validate(self):
super(Window, self).validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window < 0:
raise ValueError("window must be non-negative")
try:
import scipy.signal as sig
except ImportError:
raise ImportError('Please install scipy to generate window '
'weight')
if not isinstance(self.win_type, compat.string_types):
raise ValueError('Invalid win_type {0}'.format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError('Invalid win_type {0}'.format(self.win_type))
else:
raise ValueError('Invalid window {0}'.format(window))
def _prep_window(self, **kwargs):
"""
provide validation for our window type, return the window
we have already been validated
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com._asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {'kaiser': ['beta'],
'gaussian': ['std'],
'general_gaussian': ['power', 'width'],
'slepian': ['width']}
if win_type in arg_map:
return tuple([win_type] + _pop_args(win_type,
arg_map[win_type],
kwargs))
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = '%s window requires %%s' % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, how=None, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : boolean, default True
If True computes weighted mean, else weighted sum
how : string, default to None (DEPRECATED)
how to resample
Returns
-------
y : type of input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj, index = self._create_blocks(how=how)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return _window.roll_window(np.concatenate((arg,
additional_nans))
if center else arg, window, minp,
avg=mean)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, blocks, obj)
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
See also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name='window')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_window_func('sum', args, kwargs)
return self._apply_window(mean=False, **kwargs)
@Substitution(name='window')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_window_func('mean', args, kwargs)
return self._apply_window(mean=True, **kwargs)
class _GroupByMixin(GroupByMixin):
""" provide the groupby facilities """
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop('parent', None) # noqa
groupby = kwargs.pop('groupby', None)
if groupby is None:
groupby, obj = obj, obj.obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super(GroupByMixin, self).__init__(obj, *args, **kwargs)
count = GroupByMixin._dispatch('count')
corr = GroupByMixin._dispatch('corr', other=None, pairwise=None)
cov = GroupByMixin._dispatch('cov', other=None, pairwise=None)
def _apply(self, func, name, window=None, center=None,
check_minp=None, how=None, **kwargs):
"""
dispatch to apply; we are stripping all of the _apply kwargs and
performing the original function call on the grouped object
"""
def f(x, name=name, *args):
x = self._shallow_copy(x)
if isinstance(name, compat.string_types):
return getattr(x, name)(*args, **kwargs)
return x.apply(name, *args, **kwargs)
return self._groupby.apply(f)
class _Rolling(_Window):
@property
def _constructor(self):
return Rolling
def _apply(self, func, name=None, window=None, center=None,
check_minp=None, how=None, **kwargs):
"""
Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
func : string/callable to apply
name : string, optional
name of this function
window : int/array, default to _get_window()
center : boolean, default to self.center
check_minp : function, default to _use_window
how : string, default to None (DEPRECATED)
how to resample
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj, index = self._create_blocks(how=how)
index, indexi = self._get_index(index=index)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, compat.string_types):
cfunc = getattr(_window, func, None)
if cfunc is None:
raise ValueError("we do not support this function "
"in _window.{0}".format(func))
def func(arg, window, min_periods=None, closed=None):
minp = check_minp(min_periods, window)
# ensure we are only rolling on floats
arg = _ensure_float64(arg)
return cfunc(arg,
window, minp, indexi, closed, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(np.concatenate((x, additional_nans)),
window, min_periods=self.min_periods,
closed=self.closed)
else:
def calc(x):
return func(x, window, min_periods=self.min_periods,
closed=self.closed)
with np.errstate(all='ignore'):
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, blocks, obj)
class _Rolling_and_Expanding(_Rolling):
_shared_docs['count'] = """%(name)s count of number of non-NaN
observations inside provided window."""
def count(self):
blocks, obj, index = self._create_blocks(how=None)
index, indexi = self._get_index(index=index)
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
results = []
for b in blocks:
result = b.notnull().astype(int)
result = self._constructor(result, window=window, min_periods=0,
center=self.center,
closed=self.closed).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
_shared_docs['apply'] = dedent("""
%(name)s function apply
Parameters
----------
func : function
Must produce a single value from an ndarray input
\*args and \*\*kwargs are passed to the function""")
def apply(self, func, args=(), kwargs={}):
# TODO: _level is unused?
_level = kwargs.pop('_level', None) # noqa
window = self._get_window()
offset = _offset(window, self.center)
index, indexi = self._get_index()
def f(arg, window, min_periods, closed):
minp = _use_window(min_periods, window)
return _window.roll_generic(arg, window, minp, indexi, closed,
offset, func, args, kwargs)
return self._apply(f, func, args=args, kwargs=kwargs,
center=False)
def sum(self, *args, **kwargs):
nv.validate_window_func('sum', args, kwargs)
return self._apply('roll_sum', 'sum', **kwargs)
_shared_docs['max'] = dedent("""
%(name)s maximum
Parameters
----------
how : string, default 'max' (DEPRECATED)
Method for down- or re-sampling""")
def max(self, how=None, *args, **kwargs):
nv.validate_window_func('max', args, kwargs)
if self.freq is not None and how is None:
how = 'max'
return self._apply('roll_max', 'max', how=how, **kwargs)
_shared_docs['min'] = dedent("""
%(name)s minimum
Parameters
----------
how : string, default 'min' (DEPRECATED)
Method for down- or re-sampling""")
def min(self, how=None, *args, **kwargs):
nv.validate_window_func('min', args, kwargs)
if self.freq is not None and how is None:
how = 'min'
return self._apply('roll_min', 'min', how=how, **kwargs)
def mean(self, *args, **kwargs):
nv.validate_window_func('mean', args, kwargs)
return self._apply('roll_mean', 'mean', **kwargs)
_shared_docs['median'] = dedent("""
%(name)s median
Parameters
----------
how : string, default 'median' (DEPRECATED)
Method for down- or re-sampling""")
def median(self, how=None, **kwargs):
if self.freq is not None and how is None:
how = 'median'
return self._apply('roll_median_c', 'median', how=how, **kwargs)
_shared_docs['std'] = dedent("""
%(name)s standard deviation
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def std(self, ddof=1, *args, **kwargs):
nv.validate_window_func('std', args, kwargs)
window = self._get_window()
index, indexi = self._get_index()
def f(arg, *args, **kwargs):
minp = _require_min_periods(1)(self.min_periods, window)
return _zsqrt(_window.roll_var(arg, window, minp, indexi,
self.closed, ddof))
return self._apply(f, 'std', check_minp=_require_min_periods(1),
ddof=ddof, **kwargs)
_shared_docs['var'] = dedent("""
%(name)s variance
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def var(self, ddof=1, *args, **kwargs):
nv.validate_window_func('var', args, kwargs)
return self._apply('roll_var', 'var',
check_minp=_require_min_periods(1), ddof=ddof,
**kwargs)
_shared_docs['skew'] = """Unbiased %(name)s skewness"""
def skew(self, **kwargs):
return self._apply('roll_skew', 'skew',
check_minp=_require_min_periods(3), **kwargs)
_shared_docs['kurt'] = """Unbiased %(name)s kurtosis"""
def kurt(self, **kwargs):
return self._apply('roll_kurt', 'kurt',
check_minp=_require_min_periods(4), **kwargs)
_shared_docs['quantile'] = dedent("""
%(name)s quantile
Parameters
----------
quantile : float
0 <= quantile <= 1""")
def quantile(self, quantile, **kwargs):
window = self._get_window()
index, indexi = self._get_index()
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, window)
return _window.roll_quantile(arg, window, minp, indexi,
self.closed, quantile)
return self._apply(f, 'quantile', quantile=quantile,
**kwargs)
_shared_docs['cov'] = dedent("""
%(name)s sample covariance
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be used
and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_cov(X, Y):
# GH #12373 : rolling functions error on float32 data
# to avoid potential overflow, cast the data to float64
X = X.astype('float64')
Y = Y.astype('float64')
mean = lambda x: x.rolling(window, self.min_periods,
center=self.center).mean(**kwargs)
count = (X + Y).rolling(window=window,
center=self.center).count(**kwargs)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_cov, pairwise=bool(pairwise))
_shared_docs['corr'] = dedent("""
%(name)s sample correlation
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndex DataFrame in the case of DataFrame inputs.
In the case of missing elements, only complete pairwise observations
will be used.""")
def corr(self, other=None, pairwise=None, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_corr(a, b):
a = a.rolling(window=window, min_periods=self.min_periods,
freq=self.freq, center=self.center)
b = b.rolling(window=window, min_periods=self.min_periods,
freq=self.freq, center=self.center)
return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_corr, pairwise=bool(pairwise))
class Rolling(_Rolling_and_Expanding):
@cache_readonly
def is_datetimelike(self):
return isinstance(self._on,
(ABCDatetimeIndex,
ABCTimedeltaIndex,
ABCPeriodIndex))
@cache_readonly
def _on(self):
if self.on is None:
return self.obj.index
elif (isinstance(self.obj, ABCDataFrame) and
self.on in self.obj.columns):
return pd.Index(self.obj[self.on])
else:
raise ValueError("invalid on specified as {0}, "
"must be a column (if DataFrame) "
"or None".format(self.on))
def validate(self):
super(Rolling, self).validate()
# we allow rolling on a datetimelike index
if (self.is_datetimelike and
isinstance(self.window, (compat.string_types, DateOffset,
timedelta))):
self._validate_monotonic()
freq = self._validate_freq()
# we don't allow center
if self.center:
raise NotImplementedError("center is not implemented "
"for datetimelike and offset "
"based windows")
# this will raise ValueError on non-fixed freqs
self.window = freq.nanos
self.win_type = 'freq'
# min_periods must be an integer
if self.min_periods is None:
self.min_periods = 1
elif not is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
raise ValueError("window must be non-negative")
if not self.is_datetimelike and self.closed is not None:
raise ValueError("closed only implemented for datetimelike "
"and offset based windows")
def _validate_monotonic(self):
""" validate on is monotonic """
if not self._on.is_monotonic:
formatted = self.on or 'index'
raise ValueError("{0} must be "
"monotonic".format(formatted))
def _validate_freq(self):
""" validate & return our freq """
from pandas.tseries.frequencies import to_offset
try:
return to_offset(self.window)
except (TypeError, ValueError):
raise ValueError("passed window {0} in not "
"compat with a datetimelike "
"index".format(self.window))
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3).sum()
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -2.655105 0.637799 -2.135068
3 -0.971785 -0.600366 -3.280224
4 -0.214334 -1.294599 -3.227500
5 1.514216 2.028250 -2.989060
6 1.074618 5.709767 -2.322600
7 2.718061 3.850718 0.256446
8 -0.289082 2.454418 1.416871
9 0.212668 0.403198 -0.093924
>>> df.rolling(3).agg({'A':'sum', 'B':'min'})
A B
0 NaN NaN
1 NaN NaN
2 -2.655105 -0.165272
3 -0.971785 -1.340923
4 -0.214334 -1.340923
5 1.514216 -1.340923
6 1.074618 0.211596
7 2.718061 -1.647453
8 -0.289082 -1.647453
9 0.212668 -1.647453
See also
--------
pandas.Series.rolling
pandas.DataFrame.rolling
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
return super(Rolling, self).aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['count'])
def count(self):
# different impl for freq counting
if self.is_freq_type:
return self._apply('roll_count', 'count')
return super(Rolling, self).count()
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['apply'])
def apply(self, func, args=(), kwargs={}):
return super(Rolling, self).apply(func, args=args, kwargs=kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_rolling_func('sum', args, kwargs)
return super(Rolling, self).sum(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['max'])
def max(self, *args, **kwargs):
nv.validate_rolling_func('max', args, kwargs)
return super(Rolling, self).max(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['min'])
def min(self, *args, **kwargs):
nv.validate_rolling_func('min', args, kwargs)
return super(Rolling, self).min(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_rolling_func('mean', args, kwargs)
return super(Rolling, self).mean(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['median'])
def median(self, **kwargs):
return super(Rolling, self).median(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['std'])
def std(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func('std', args, kwargs)
return super(Rolling, self).std(ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['var'])
def var(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func('var', args, kwargs)
return super(Rolling, self).var(ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['skew'])
def skew(self, **kwargs):
return super(Rolling, self).skew(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['kurt'])
def kurt(self, **kwargs):
return super(Rolling, self).kurt(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['quantile'])
def quantile(self, quantile, **kwargs):
return super(Rolling, self).quantile(quantile=quantile, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['cov'])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super(Rolling, self).cov(other=other, pairwise=pairwise,
ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['corr'])
def corr(self, other=None, pairwise=None, **kwargs):
return super(Rolling, self).corr(other=other, pairwise=pairwise,
**kwargs)
class RollingGroupby(_GroupByMixin, Rolling):
"""
Provides a rolling groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Rolling
def _gotitem(self, key, ndim, subset=None):
# we are setting the index on the actual object
# here so our index is carried thru to the selected obj
# when we do the splitting for the groupby
if self.on is not None:
self._groupby.obj = self._groupby.obj.set_index(self._on)
self.on = None
return super(RollingGroupby, self)._gotitem(key, ndim, subset=subset)
def _validate_monotonic(self):
"""
validate that on is monotonic;
we don't care for groupby.rolling
because we have already validated at a higher
level
"""
pass
class Expanding(_Rolling_and_Expanding):
"""
Provides expanding transformations.
.. versionadded:: 0.18.0
Parameters
----------
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None) (DEPRECATED)
Frequency to conform the data to before computing the statistic.
Specified as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
axis : int or string, default 0
Returns
-------
a Window sub-classed for the particular operation
Examples
--------
>>> df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.expanding(2).sum()
B
0 NaN
1 1.0
2 3.0
3 3.0
4 7.0
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
_attributes = ['min_periods', 'freq', 'center', 'axis']
def __init__(self, obj, min_periods=1, freq=None, center=False, axis=0,
**kwargs):
super(Expanding, self).__init__(obj=obj, min_periods=min_periods,
freq=freq, center=center, axis=axis)
@property
def _constructor(self):
return Expanding
def _get_window(self, other=None):
obj = self._selected_obj
if other is None:
return (max(len(obj), self.min_periods) if self.min_periods
else len(obj))
return (max((len(obj) + len(obj)), self.min_periods)
if self.min_periods else (len(obj) + len(obj)))
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
See also
--------
pandas.DataFrame.expanding.aggregate
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
return super(Expanding, self).aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['count'])
def count(self, **kwargs):
return super(Expanding, self).count(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['apply'])
def apply(self, func, args=(), kwargs={}):
return super(Expanding, self).apply(func, args=args, kwargs=kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_expanding_func('sum', args, kwargs)
return super(Expanding, self).sum(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['max'])
def max(self, *args, **kwargs):
nv.validate_expanding_func('max', args, kwargs)
return super(Expanding, self).max(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['min'])
def min(self, *args, **kwargs):
nv.validate_expanding_func('min', args, kwargs)
return super(Expanding, self).min(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_expanding_func('mean', args, kwargs)
return super(Expanding, self).mean(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['median'])
def median(self, **kwargs):
return super(Expanding, self).median(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['std'])
def std(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func('std', args, kwargs)
return super(Expanding, self).std(ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['var'])
def var(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func('var', args, kwargs)
return super(Expanding, self).var(ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['skew'])
def skew(self, **kwargs):
return super(Expanding, self).skew(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['kurt'])
def kurt(self, **kwargs):
return super(Expanding, self).kurt(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['quantile'])
def quantile(self, quantile, **kwargs):
return super(Expanding, self).quantile(quantile=quantile, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['cov'])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super(Expanding, self).cov(other=other, pairwise=pairwise,
ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['corr'])
def corr(self, other=None, pairwise=None, **kwargs):
return super(Expanding, self).corr(other=other, pairwise=pairwise,
**kwargs)
class ExpandingGroupby(_GroupByMixin, Expanding):
"""
Provides a expanding groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Expanding
_bias_template = """
Parameters
----------
bias : boolean, default False
Use a standard estimation bias correction
"""
_pairwise_template = """
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be used and
the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the output
will be a MultiIndex DataFrame in the case of DataFrame inputs.
In the case of missing elements, only complete pairwise observations will
be used.
bias : boolean, default False
Use a standard estimation bias correction
"""
class EWM(_Rolling):
r"""
Provides exponential weighted functions
.. versionadded:: 0.18.0
Parameters
----------
com : float, optional
Specify decay in terms of center of mass,
:math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`
span : float, optional
Specify decay in terms of span,
:math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`
halflife : float, optional
Specify decay in terms of half-life,
:math:`\alpha = 1 - exp(log(0.5) / halflife),\text{ for } halflife > 0`
alpha : float, optional
Specify smoothing factor :math:`\alpha` directly,
:math:`0 < \alpha \leq 1`
.. versionadded:: 0.18.0
min_periods : int, default 0
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : None or string alias / date offset object, default=None (DEPRECATED)
Frequency to conform to before computing statistic
adjust : boolean, default True
Divide by decaying adjustment factor in beginning periods to account
for imbalance in relative weightings (viewing EWMA as a moving average)
ignore_na : boolean, default False
Ignore missing values when calculating weights;
specify True to reproduce pre-0.15.0 behavior
Returns
-------
a Window sub-classed for the particular operation
Examples
--------
>>> df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.ewm(com=0.5).mean()
B
0 0.000000
1 0.750000
2 1.615385
3 1.615385
4 3.670213
Notes
-----
Exactly one of center of mass, span, half-life, and alpha must be provided.
Allowed values and relationship between the parameters are specified in the
parameter descriptions above; see the link at the end of this section for
a detailed explanation.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
When adjust is True (default), weighted averages are calculated using
weights (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1.
When adjust is False, weighted averages are calculated recursively as:
weighted_average[0] = arg[0];
weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i].
When ignore_na is False (default), weights are based on absolute positions.
For example, the weights of x and y used in calculating the final weighted
average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and
(1-alpha)**2 and alpha (if adjust is False).
When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based
on relative positions. For example, the weights of x and y used in
calculating the final weighted average of [x, None, y] are 1-alpha and 1
(if adjust is True), and 1-alpha and alpha (if adjust is False).
More details can be found at
http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-windows
"""
_attributes = ['com', 'min_periods', 'freq', 'adjust', 'ignore_na', 'axis']
def __init__(self, obj, com=None, span=None, halflife=None, alpha=None,
min_periods=0, freq=None, adjust=True, ignore_na=False,
axis=0):
self.obj = obj
self.com = _get_center_of_mass(com, span, halflife, alpha)
self.min_periods = min_periods
self.freq = freq
self.adjust = adjust
self.ignore_na = ignore_na
self.axis = axis
self.on = None
@property
def _constructor(self):
return EWM
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
See also
--------
pandas.DataFrame.rolling.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
return super(EWM, self).aggregate(arg, *args, **kwargs)
agg = aggregate
def _apply(self, func, how=None, **kwargs):
"""Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
func : string/callable to apply
how : string, default to None (DEPRECATED)
how to resample
Returns
-------
y : type of input argument
"""
blocks, obj, index = self._create_blocks(how=how)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, compat.string_types):
cfunc = getattr(_window, func, None)
if cfunc is None:
raise ValueError("we do not support this function "
"in _window.{0}".format(func))
def func(arg):
return cfunc(arg, self.com, int(self.adjust),
int(self.ignore_na), int(self.min_periods))
results.append(np.apply_along_axis(func, self.axis, values))
return self._wrap_results(results, blocks, obj)
@Substitution(name='ewm')
@Appender(_doc_template)
def mean(self, *args, **kwargs):
"""exponential weighted moving average"""
nv.validate_window_func('mean', args, kwargs)
return self._apply('ewma', **kwargs)
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_bias_template)
def std(self, bias=False, *args, **kwargs):
"""exponential weighted moving stddev"""
nv.validate_window_func('std', args, kwargs)
return _zsqrt(self.var(bias=bias, **kwargs))
vol = std
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_bias_template)
def var(self, bias=False, *args, **kwargs):
"""exponential weighted moving variance"""
nv.validate_window_func('var', args, kwargs)
def f(arg):
return _window.ewmcov(arg, arg, self.com, int(self.adjust),
int(self.ignore_na), int(self.min_periods),
int(bias))
return self._apply(f, **kwargs)
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_pairwise_template)
def cov(self, other=None, pairwise=None, bias=False, **kwargs):
"""exponential weighted sample covariance"""
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
def _get_cov(X, Y):
X = self._shallow_copy(X)
Y = self._shallow_copy(Y)
cov = _window.ewmcov(X._prep_values(), Y._prep_values(), self.com,
int(self.adjust), int(self.ignore_na),
int(self.min_periods), int(bias))
return X._wrap_result(cov)
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_cov, pairwise=bool(pairwise))
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_pairwise_template)
def corr(self, other=None, pairwise=None, **kwargs):
"""exponential weighted sample correlation"""
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
def _get_corr(X, Y):
X = self._shallow_copy(X)
Y = self._shallow_copy(Y)
def _cov(x, y):
return _window.ewmcov(x, y, self.com, int(self.adjust),
int(self.ignore_na),
int(self.min_periods),
1)
x_values = X._prep_values()
y_values = Y._prep_values()
with np.errstate(all='ignore'):
cov = _cov(x_values, y_values)
x_var = _cov(x_values, x_values)
y_var = _cov(y_values, y_values)
corr = cov / _zsqrt(x_var * y_var)
return X._wrap_result(corr)
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_corr, pairwise=bool(pairwise))
# Helper Funcs
def _flex_binary_moment(arg1, arg2, f, pairwise=False):
from pandas import Series, DataFrame
if not (isinstance(arg1, (np.ndarray, Series, DataFrame)) and
isinstance(arg2, (np.ndarray, Series, DataFrame))):
raise TypeError("arguments to moment function must be of type "
"np.ndarray/Series/DataFrame")
if (isinstance(arg1, (np.ndarray, Series)) and
isinstance(arg2, (np.ndarray, Series))):
X, Y = _prep_binary(arg1, arg2)
return f(X, Y)
elif isinstance(arg1, DataFrame):
def dataframe_from_int_dict(data, frame_template):
result = DataFrame(data, index=frame_template.index)
if len(result.columns) > 0:
result.columns = frame_template.columns[result.columns]
return result
results = {}
if isinstance(arg2, DataFrame):
if pairwise is False:
if arg1 is arg2:
# special case in order to handle duplicate column names
for i, col in enumerate(arg1.columns):
results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i])
return dataframe_from_int_dict(results, arg1)
else:
if not arg1.columns.is_unique:
raise ValueError("'arg1' columns are not unique")
if not arg2.columns.is_unique:
raise ValueError("'arg2' columns are not unique")
with warnings.catch_warnings(record=True):
X, Y = arg1.align(arg2, join='outer')
X = X + 0 * Y
Y = Y + 0 * X
with warnings.catch_warnings(record=True):
res_columns = arg1.columns.union(arg2.columns)
for col in res_columns:
if col in X and col in Y:
results[col] = f(X[col], Y[col])
return DataFrame(results, index=X.index,
columns=res_columns)
elif pairwise is True:
results = defaultdict(dict)
for i, k1 in enumerate(arg1.columns):
for j, k2 in enumerate(arg2.columns):
if j < i and arg2 is arg1:
# Symmetric case
results[i][j] = results[j][i]
else:
results[i][j] = f(*_prep_binary(arg1.iloc[:, i],
arg2.iloc[:, j]))
# TODO: not the most efficient (perf-wise)
# though not bad code-wise
from pandas import Panel, MultiIndex, Index
with warnings.catch_warnings(record=True):
p = Panel.from_dict(results).swapaxes('items', 'major')
if len(p.major_axis) > 0:
p.major_axis = arg1.columns[p.major_axis]
if len(p.minor_axis) > 0:
p.minor_axis = arg2.columns[p.minor_axis]
if len(p.items):
result = pd.concat(
[p.iloc[i].T for i in range(len(p.items))],
keys=p.items)
else:
result = DataFrame(
index=MultiIndex(levels=[arg1.index, arg1.columns],
labels=[[], []]),
columns=arg2.columns,
dtype='float64')
# reset our index names to arg1 names
# reset our column names to arg2 names
# careful not to mutate the original names
result.columns = Index(result.columns).set_names(
arg2.columns.name)
result.index = result.index.set_names(
[arg1.index.name, arg1.columns.name])
return result
else:
raise ValueError("'pairwise' is not True/False")
else:
results = {}
for i, col in enumerate(arg1.columns):
results[i] = f(*_prep_binary(arg1.iloc[:, i], arg2))
return dataframe_from_int_dict(results, arg1)
else:
return _flex_binary_moment(arg2, arg1, f)
def _get_center_of_mass(com, span, halflife, alpha):
valid_count = len([x for x in [com, span, halflife, alpha]
if x is not None])
if valid_count > 1:
raise ValueError("com, span, halflife, and alpha "
"are mutually exclusive")
# Convert to center of mass; domain checks ensure 0 < alpha <= 1
if com is not None:
if com < 0:
raise ValueError("com must satisfy: com >= 0")
elif span is not None:
if span < 1:
raise ValueError("span must satisfy: span >= 1")
com = (span - 1) / 2.
elif halflife is not None:
if halflife <= 0:
raise ValueError("halflife must satisfy: halflife > 0")
decay = 1 - np.exp(np.log(0.5) / halflife)
com = 1 / decay - 1
elif alpha is not None:
if alpha <= 0 or alpha > 1:
raise ValueError("alpha must satisfy: 0 < alpha <= 1")
com = (1.0 - alpha) / alpha
else:
raise ValueError("Must pass one of com, span, halflife, or alpha")
return float(com)
def _offset(window, center):
if not is_integer(window):
window = len(window)
offset = (window - 1) / 2. if center else 0
try:
return int(offset)
except:
return offset.astype(int)
def _require_min_periods(p):
def _check_func(minp, window):
if minp is None:
return window
else:
return max(p, minp)
return _check_func
def _use_window(minp, window):
if minp is None:
return window
else:
return minp
def _zsqrt(x):
with np.errstate(all='ignore'):
result = np.sqrt(x)
mask = x < 0
from pandas import DataFrame
if isinstance(x, DataFrame):
if mask.values.any():
result[mask] = 0
else:
if mask.any():
result[mask] = 0
return result
def _prep_binary(arg1, arg2):
if not isinstance(arg2, type(arg1)):
raise Exception('Input arrays must be of the same type!')
# mask out values, this also makes a common index...
X = arg1 + 0 * arg2
Y = arg2 + 0 * arg1
return X, Y
# Top-level exports
def rolling(obj, win_type=None, **kwds):
from pandas import Series, DataFrame
if not isinstance(obj, (Series, DataFrame)):
raise TypeError('invalid type: %s' % type(obj))
if win_type is not None:
return Window(obj, win_type=win_type, **kwds)
return Rolling(obj, **kwds)
rolling.__doc__ = Window.__doc__
def expanding(obj, **kwds):
from pandas import Series, DataFrame
if not isinstance(obj, (Series, DataFrame)):
raise TypeError('invalid type: %s' % type(obj))
return Expanding(obj, **kwds)
expanding.__doc__ = Expanding.__doc__
def ewm(obj, **kwds):
from pandas import Series, DataFrame
if not isinstance(obj, (Series, DataFrame)):
raise TypeError('invalid type: %s' % type(obj))
return EWM(obj, **kwds)
ewm.__doc__ = EWM.__doc__
| agpl-3.0 |
datacommonsorg/data | scripts/us_fed/treasury_constant_maturity_rates/generate_csv_and_mcf.py | 1 | 5150 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
1. Extracts the data portion out of the constant maturity rate csv file
downloaded from Federal Reserve and store it in
"treasury_constant_maturity_rates.csv".
The output table has the same number of columns as the number of constant
maturities provided and an extra column for dates.
"date" column is of the form "YYYY-MM-DD".
The other interest rate columns are numeric.
2. Generates the StatisticalVariable instance and template MCFs.
Run "python3 generate_csv_and_mcf.py --help" for usage.
'''
from absl import app
from absl import flags
import pandas as pd
from frozendict import frozendict
FLAGS = flags.FLAGS
flags.DEFINE_boolean("csv", True, "Whether or not to generate the csv.")
flags.DEFINE_boolean(
"mcf", False,
"Whether or not to generate the template and StatisticalVariable"
"instance MCFs.")
flags.DEFINE_string("path", "FRB_H15.csv",
"Path to the raw csv containing rates at all maturities.")
# Maturities for which interest rates are provided by BEA.
# Treasury bills have maturities of a year or less, notes greater than 1 year up
# to 10 years, and bonds greater than 10 years.
MATURITIES = frozendict({
"1-month": "Bill",
"3-month": "Bill",
"6-month": "Bill",
"1-year": "Bill",
"2-year": "Note",
"3-year": "Note",
"5-year": "Note",
"7-year": "Note",
"10-year": "Note",
"20-year": "Bond",
"30-year": "Bond"
})
# URL of the raw csv
CSV_URL = "https://www.federalreserve.gov/datadownload/Output.aspx?rel=H15&"\
"series=bf17364827e38702b42a58cf8eaa3f78&lastobs=&from=&to="\
"&filetype=csv&label=include&layout=seriescolumn&type=package"
def generate_csv():
'''Generates the csv containing the data portion of the constant
maturity rate csv file downloaded from Federal Reserve'''
out_df = pd.DataFrame()
header_rows = 5
name_template = "Market yield on U.S. Treasury securities at {} constant"\
" maturity, quoted on investment basis"
in_df = pd.read_csv(CSV_URL, na_values="ND")
out_df["date"] = in_df["Series Description"][header_rows:]
for maturity in MATURITIES:
column_name = name_template.format(maturity)
out_df[maturity.title()] = in_df[column_name][header_rows:]
out_df.to_csv("treasury_constant_maturity_rates.csv", index=False)
def generate_mcf():
'''Generates the template and StatisticalVariable instance MCFs'''
variable_template = (
'Node: dcid:InterestRate_Treasury{security_type}_{maturity_no_hyphen}\n'
'name: "InterestRate_Treasury{security_type}_{maturity_no_hyphen}"\n'
'typeOf: dcs:StatisticalVariable\n'
'measuredProperty: dcs:interestRate\n'
'populationType: dcs:Treasury{security_type}\n'
'maturity: [{maturity_space}]\n'
'statType: dcs:measuredValue\n')
template_template = (
'Node: E:{filename}->E{index}\n'
'typeOf: dcs:StatVarObservation\n'
'variableMeasured: '
'dcs:InterestRate_Treasury{security_type}_{maturity_no_hyphen}\n'
'measurementMethod: dcs:ConstantMaturityRate\n'
'unit: dcs:Percent\n'
'observationAbout: dcid:country/USA\n'
'observationDate: C:{filename}->date\n'
'value: C:{filename}->{maturity_hyphen}\n')
with open("treasury_constant_maturity_rates.mcf", "w") as mcf_f, \
open("treasury_constant_maturity_rates.tmcf", "w") as tmcf_f:
index = 1
for maturity, security_type in MATURITIES.items():
maturity_hyphen = maturity.title()
maturity_no_hyphen = maturity_hyphen.replace("-", "")
maturity_space = maturity_hyphen.replace("-", " ")
maturity_underscore = maturity_hyphen.replace("-", "_")
format_dict = {
"filename": "treasury_constant_maturity_rates",
"index": index,
"maturity_underscore": maturity_underscore,
"maturity_hyphen": maturity_hyphen,
"security_type": security_type,
"maturity_no_hyphen": maturity_no_hyphen,
"maturity_space": maturity_space
}
mcf_f.write(variable_template.format_map(format_dict))
mcf_f.write("\n")
tmcf_f.write(template_template.format_map(format_dict))
tmcf_f.write("\n")
index += 1
def main(_):
"""Runs the code."""
if FLAGS.csv:
generate_csv()
if FLAGS.mcf:
generate_mcf()
if __name__ == "__main__":
app.run(main)
| apache-2.0 |
JannickWeisshaupt/OpenDFT | src/build_freeze3.py | 1 | 3018 | import sys
import os
from os import listdir
from os.path import isfile, join
from cx_Freeze import setup, Executable
import cx_Freeze.hooks
def hack(finder, module):
return
cx_Freeze.hooks.load_matplotlib = hack
import scipy
import matplotlib
from encodings import ascii
from encodings import idna
from encodings import unicode_escape
import ruamel.yaml
scipy_path = os.path.dirname(scipy.__file__) #use this if you are also using scipy in your application
build_exe_options = {"packages": ["pyface.ui.qt4", "tvtk.vtk_module", "tvtk.pyface.ui.wx", "matplotlib.backends.backend_qt4",'pkg_resources._vendor','pkg_resources.extern','pygments.lexers',
'tvtk.pyface.ui.qt4','pyface.qt','pyface.qt.QtGui','pyface.qt.QtCore','numpy','matplotlib','mayavi'],
"include_files": [(str(scipy_path), "scipy"),'./examples/', #for scipy
(matplotlib.get_data_path(), "mpl-data"),'/home/jannick/python_programs/OpenDFT/src/data','/home/jannick/.local/lib/python3.4/site-packages/pymatgen/core/bond_lengths.json',
'/home/jannick/.local/lib/python3.4/site-packages/pymatgen/core/func_groups.json','/home/jannick/.local/lib/python3.4/site-packages/pymatgen/core/libxc_docs.json',
'/home/jannick/.local/lib/python3.4/site-packages/pymatgen/core/periodic_table.json','/home/jannick/.local/lib/python3.4/site-packages/pymatgen/core/reconstructions_archive.json',
'/usr/local/lib/python3.4/site-packages/PyQt4','/home/jannick/.local/lib/python3.4/site-packages/mayavi','/home/jannick/.local/lib/python3.4/site-packages/ruamel',
'/home/jannick/.local/lib/python3.4/site-packages/pyface','/home/jannick/.local/lib/python3.4/site-packages/tvtk',
'/home/jannick/.local/lib/python3.4/site-packages/pymatgen/core/bond_lengths.json','/home/jannick/.local/lib/python3.4/site-packages/pymatgen/core/func_groups.json',
'/home/jannick/.local/lib/python3.4/site-packages/pymatgen/core/libxc_docs.json','/home/jannick/.local/lib/python3.4/site-packages/pymatgen/core/periodic_table.json',
'/home/jannick/.local/lib/python3.4/site-packages/pymatgen/core/reconstructions_archive.json'
],
"includes":['PyQt4.QtCore','PyQt4.QtGui','pymatgen','pymatgen.symmetry.bandstructure','mayavi','PyQt4'],
'excludes':'Tkinter',
"namespace_packages": ['mayavi','mpl_toolkits'],
'build_exe': '../build/OpenDFT'
}
executables = [
Executable('main.py', targetName="OpenDFT",icon="./data/icons/icon.ico")
]
setup(name='OpenDFT',
version='1.0',
description='OpenDFT',
options = {"build_exe": build_exe_options},
executables=executables,
) | gpl-3.0 |
calben/retino | scripts/test_overview_figs.py | 1 | 9646 | import matplotlib.gridspec as gridspec
import os
import time
import seaborn as sns
from multiprocessing import *
from matplotlib.collections import EllipseCollection
from retino.plot import *
from retino.utils import *
sns.set_style("ticks")
def plot_axon_growth_algorithm(origin, end, target):
t = time.time()
a_origin = origin
a_end = end
origin = np.array(a_end)
desired_direction_weight = 1.1
momentum_direction_weight = 1
desired_direction = get_unit_direction_vector(origin, target)
momentum_direction = get_unit_direction_vector(a_origin, a_end)
desired_and_momentum = desired_direction_weight * desired_direction + momentum_direction_weight * momentum_direction
desired_and_momentum = get_unit_vector(desired_and_momentum)
prenoise_pol = cart_to_pol(desired_and_momentum)[1]
results = []
for i in range(50):
r = np.random.normal(3.0, 1.0, size=1)[0]
noise = np.random.normal(0, .4, size=1)[0]
theta = prenoise_pol + noise
cart_result = pol_to_cart(np.asarray([r, theta]))
results.append(cart_result)
desired_direction = desired_direction * 3
momentum_direction = momentum_direction * 3
desired_and_momentum = desired_and_momentum * 3
fig, ax = plt.subplots(figsize=(5, 5))
ax.plot([a_origin[0], a_end[0]], [a_origin[1], a_end[1]], color="y", linewidth=3.0, label="Segment of Origin")
ax.plot([0, desired_direction[0]], [0, desired_direction[1]], color="g", linewidth=3.0, label="Desired Direction")
ax.plot([0, momentum_direction[0]], [0, momentum_direction[1]], color="r", linewidth=3.0,
label="Momentum Direction")
ax.plot([0, desired_and_momentum[0]], [0, desired_and_momentum[1]], color="b", linewidth=3.0,
label="Weighted Guide Direction")
ax.set_aspect(1)
ax.set_xlim([-4, 4])
ax.set_ylim([-4, 4])
legend = ax.legend(loc='best', shadow=True, fancybox=True)
sns.despine(offset=5)
plt.savefig(generate_tex_friendly_filename(
"paperfigs/GrowthAlgorithmGuideDirection-Direction=" + str(desired_and_momentum)) + ".pdf")
plt.close(fig)
fig, ax = plt.subplots(figsize=(5, 5))
ax.plot([a_origin[0], a_end[0]], [a_origin[1], a_end[1]], color="y", linewidth=3.0, label="Segment of Origin")
ax.plot([0, desired_and_momentum[0]], [0, desired_and_momentum[1]], color="b", linewidth=3.0,
label="Weighted Guide Direction")
for i in range(50):
ax.plot([0, results[i][0]], [0, results[i][1]], color="r", alpha=.3, linewidth=1.0)
ax.set_aspect(1)
ax.set_xlim([-4, 4])
ax.set_ylim([-4, 4])
plt.tight_layout()
plt.savefig(generate_tex_friendly_filename(
"paperfigs/GrowthAlgorithmNewSegmentFamily-Direction=" + str(desired_and_momentum)) + ".pdf")
plt.close(fig)
print(
"Finished GrowthAlgorithmGuideDirection-Direction=" + str(desired_and_momentum) + " in " + str(
time.time() - t))
def plot_demo_for_synapse_growth(origin, end, pool_size, jitter, points_count):
t = time.time()
direction = cart_to_pol(end - origin)
pool = generate_points_along_line(origin, direction, pool_size)
jittered_pool = add_jitter_to_points(pool, jitter)
origins = choose_points_subset(jittered_pool, points_count)
fig, ax = plt.subplots(figsize=(5, 5))
ax.scatter([p[0] for p in pool], [p[1] for p in pool], label="1. Pool of Points Along Axon", color="r", s=30)
ax.scatter([p[0] for p in jittered_pool], [p[1] for p in jittered_pool], label="2. Pool with 0.1 Jitter", color="c",
s=30)
ax.scatter([p[0] for p in origins], [p[1] for p in origins], label="3. Chosen Origins for Post Synaptic Attempts",
color="0", s=40)
legend = ax.legend(loc='upper left', shadow=True)
ax.set_aspect('equal', 'datalim')
plt.tight_layout()
plt.savefig(generate_tex_friendly_filename(
"paperfigs/SynapseGrowth-Pool=" + str(pool_size) + "-Jitter=" + str(jitter) + "-Points=" + str(
points_count)) + ".pdf")
plt.close(fig)
print("Finished SynapseGrowth-Pool=" + str(pool_size) + "-Jitter=" + str(jitter) + "-Points=" + str(
points_count) + " in " + str(time.time() - t))
def produce_demo_for_synapses_to_postsynapses():
origin = np.asarray([5.0, 5.0])
end = np.asarray([13.0, 14.0])
direction = cart_to_pol(end - origin)
origins = generate_random_points_along_line(origin, direction, 100, 20)
number_of_circles = 250
circle_origins = [produce_bounded_random_point(15.0, 15.0) for i in range(number_of_circles)]
circle_radiuses = [np.random.normal(1.5, 0.5, size=1)[0] for i in range(number_of_circles)]
fig, ax = plt.subplots(1, figsize=(3, 3))
ax.plot([origin[0], end[0]], [origin[1], end[1]], color="y", linewidth=1.0, label="Axon Segment")
ax.scatter([p[0] for p in origins], [p[1] for p in origins], label="Chosen Origins for Post Synaptic Attempts",
color="b", s=15, zorder=5)
ax.add_collection(
EllipseCollection(widths=[2 * x for x in circle_radiuses], heights=[2 * x for x in circle_radiuses],
angles=0,
units='xy',
facecolors='r',
offsets=circle_origins, transOffset=ax.transData, alpha=0.1,
label="Pool of Postsynapses"))
chosen_circle_origins = []
chosen_circle_radiuses = []
for origin in origins:
i = choose_random_circle_as_connection_index(origin, circle_origins, circle_radiuses)
chosen_circle_origins.append(circle_origins[i][:])
chosen_circle_radiuses.append(circle_radiuses[i])
ax.add_collection(EllipseCollection(widths=[x * 2 for x in chosen_circle_radiuses],
heights=[x * 2 for x in chosen_circle_radiuses], angles=0, units='xy',
facecolors='g',
offsets=chosen_circle_origins, transOffset=ax.transData, alpha=0.2,
label="Connected Postsynapses"))
ax.set_title("PostSynapse Choosing For Growth")
ax.set_aspect('equal', 'datalim')
legend = ax.legend(loc='upper left', shadow=True)
plt.tight_layout()
plt.savefig(generate_tex_friendly_filename("paperfigs/SynapseChoosingForGrowth") + ".pdf")
plt.close(fig)
print("Finished SynapseChoosingForGrowth in " + str(time.time() - t))
def plot_demo_for_colouring_circles_by_gradients():
t = time.time()
number_of_circles = 2500
origins = [produce_bounded_random_point(100.0, 100.0) for i in range(number_of_circles)]
colors = [convert_ndpoint_to_gradients(p, [100.0, 100.0]) for p in origins]
for axis in ["X", "Y"]:
fig, ax = plt.subplots(1, figsize=(3, 3))
if axis == "X":
ax.scatter([p[0] for p in origins], [p[1] for p in origins],
color=[c[0] for c in colors], s=15, zorder=5)
if axis == "Y":
ax.scatter([p[0] for p in origins], [p[1] for p in origins],
color=[c[1] for c in colors], s=15, zorder=5)
ax.set_title("Colored Points by Gradient Along " + axis)
ax.set_xlim([0, 100.0])
ax.set_ylim([0, 100.0])
ax.set_aspect(1)
sns.set_style("ticks")
sns.despine(offset=5)
file_name = generate_tex_friendly_filename(
"paperfigs/ColoredPointsByGradient-Axis=" + axis + "-NumCircle=" + str(number_of_circles)) + ".png"
print(file_name)
plt.savefig(file_name, dpi=300)
plt.close(fig)
print("Finished ColoredPointsByGradient in " + str(time.time() - t))
# def plot_demo_for_activity_black_white_signal(image_str):
# t = time.time()
# origins, colors = image_to_activity_points("/images/" + image_str, resolution=100)
# # origins = add_jitter_to_points(origins, .25)
# fig, ax = plt.subplots(1, figsize=(3, 3))
# ax.scatter([p[0] for p in origins], [p[1] for p in origins], c=colors, s=6)
# ax.set_xlim([0, 100.0])
# ax.set_ylim([0, 100.0])
# ax.set_aspect(1)
# plt.tight_layout()
# plt.savefig(generate_tex_friendly_filename("paperfigs/GrayscalePointsByActivity-" + image_str[:-4]) + ".png", dpi=300)
# print("Finished GrayscalePointsByActivity-" + image_str[:-4] + " in " + str(time.time() - t))
if __name__ == '__main__':
origin = np.asarray([-2.5, -2.5])
end = np.asarray([0.0, 0.0])
target = np.asarray([3.0, 8.0])
p1 = Process(target=plot_axon_growth_algorithm, args=(origin, end, target,))
p1.start()
print("Started P1")
origin = np.asarray([-2.5, -2.5])
end = np.asarray([0.0, 0.0])
target = np.asarray([-4.0, -3.0])
p2 = Process(target=plot_axon_growth_algorithm, args=(origin, end, target,))
p2.start()
print("Started P2")
origin = np.asarray([0.0, 0.0])
end = np.asarray([12.0, 12.0])
p3 = Process(target=plot_demo_for_synapse_growth, args=(origin, end, 24, .2, 8,))
p3.start()
print("Started P3")
origin = np.asarray([0.0, 0.0])
end = np.asarray([12.0, 12.0])
p4 = Process(target=plot_demo_for_synapse_growth, args=(origin, end, 82, .2, 8,))
p4.start()
print("Started P4")
p5 = Process(target=plot_demo_for_colouring_circles_by_gradients, args=())
p5.start()
print("Started P5")
# image_str = "chaplin.jpg"
# p6 = Process(target=plot_demo_for_activity_black_white_signal, args=(image_str,))
# p6.start()
# print("Started P6")
| mit |
jbogaardt/chainladder-python | examples/plot_triangle_from_pandas.py | 1 | 1318 | """
=======================
Basic Triangle Creation
=======================
This example demonstrates the typical way you'd ingest data into a Triangle.
Data in tabular form in a pandas DataFrame is required. At a minimum, columns
specifying origin and development, and a value must be present. Note, you can
include more than one column as a list as well as any number of indices for
creating triangle subgroups.
In this example, we create a triangle object with triangles for each company
in the CAS Loss Reserve Database for Workers' Compensation.
"""
import chainladder as cl
import pandas as pd
# Read in the data
data = pd.read_csv(r'https://raw.githubusercontent.com/casact/chainladder-python/master/chainladder/utils/data/clrd.csv')
# Create a triangle
triangle = cl.Triangle(
data, origin='AccidentYear', development='DevelopmentYear',
index=['GRNAME'], columns=['IncurLoss','CumPaidLoss','EarnedPremDIR'])
# Output
print('Raw data:')
print(data.head())
print()
print('Triangle summary:')
print(triangle)
print()
print('Aggregate Paid Triangle:')
print(triangle['CumPaidLoss'].sum())
# Plot data
triangle['CumPaidLoss'].sum().T.plot(
marker='.', grid=True,
title='CAS Loss Reserve Database: Workers Compensation').set(
xlabel='Development Period', ylabel='Cumulative Paid Loss');
| mit |
gams/stateair_stats | data_stats.py | 1 | 6417 | # -*- coding: utf-8 -*-
import argparse
import datetime
import glob
import os
import os.path
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
DATA_FILES = [
'_data/shanghai/Shanghai_2011_HourlyPM25_created20140423.csv',
'_data/shanghai/Shanghai_2012_HourlyPM25_created20140423.csv',
'_data/shanghai/Shanghai_2013_HourlyPM25_created20140423.csv',
'_data/shanghai/Shanghai_2014_HourlyPM25_created20150203.csv',
'_data/shanghai/Shanghai_2015_HourlyPM25_created20150901.csv',
]
EPA_BP = {
'good': (0, 12),
'moderate': (12.1, 35.4),
'unhealthy_sensitive': (35.5, 55.4),
'unhealthy': (55.5, 150.4),
'very_unhealthy': (150.5, 250.4),
'hazardous': (250.5, 500.4),
}
DATAROOT = '_data'
DEFAULT_CITY = 'shanghai'
AQI_BP = EPA_BP
START_YEAR = -1
END_YEAR = -1
def src_process(data, source):
"""Process a CSV file from stateair, return month-ly aggregated data with
counts for each AQI level.
.. warning:: undefined data is simply dropped
"""
df = pd.read_csv(source, usecols=[2, 7, 10], index_col=0,
parse_dates=True,
na_values=['-999', '-1']).dropna()
for chunk in df.itertuples():
ts = chunk[0]
pm25 = chunk[1]
monthdt = datetime.date(ts.year, ts.month, 1)
if monthdt not in data:
data[monthdt] = {
'good': 0,
'moderate': 0,
'unhealthy_sensitive': 0,
'unhealthy': 0,
'very_unhealthy': 0,
'hazardous': 0,
'out_of_scale': 0,
}
match = False
for (name, bp) in AQI_BP.items():
if pm25 >= bp[0] and pm25 <= bp[1]:
match = True
data[monthdt][name] += 1
break
if match is False:
data[monthdt]['out_of_scale'] += 1
def year_range(data):
y_range = []
for date in data.keys():
if date.year not in y_range:
y_range.append(date.year)
y_range.sort()
return y_range
def get_datasets(data):
datasets = {}
for year in year_range(data):
if year not in datasets:
datasets[year] = {
'months': [],
'good': [],
'moderate': [],
'unhealthy_sensitive': [],
'unhealthy': [],
'very_unhealthy': [],
'hazardous': [],
'out_of_scale': [],
}
for month in range(1, 13):
date = datetime.date(year, month, 1)
if date in data:
datasets[year]['months'].append(month)
datasets[year]['good'].append(data[date]['good'])
datasets[year]['moderate'].append(data[date]['moderate'])
datasets[year]['unhealthy_sensitive'].append(data[date]['unhealthy_sensitive'])
datasets[year]['unhealthy'].append(data[date]['unhealthy'])
datasets[year]['very_unhealthy'].append(data[date]['very_unhealthy'])
datasets[year]['hazardous'].append(data[date]['hazardous'])
datasets[year]['out_of_scale'].append(data[date]['out_of_scale'])
else:
datasets[year]['months'].append(month)
datasets[year]['good'].append(0)
datasets[year]['moderate'].append(0)
datasets[year]['unhealthy_sensitive'].append(0)
datasets[year]['unhealthy'].append(0)
datasets[year]['very_unhealthy'].append(0)
datasets[year]['hazardous'].append(0)
datasets[year]['out_of_scale'].append(0)
return datasets
def plot_stacked_bars(data, city):
datasets = get_datasets(data)
N = 12
width = 0.9 / len(datasets.keys())
fig, ax = plt.subplots()
offset = 0
for year in datasets:
ind = np.arange(N) + 0.05 + width * offset
ax.bar(ind, datasets[year]['good'], width, color='#00e400')
btm = np.array(datasets[year]['good'])
ax.bar(ind, datasets[year]['moderate'], width, bottom=btm,
color='#ffff00')
btm += np.array(datasets[year]['moderate'])
ax.bar(ind, datasets[year]['unhealthy_sensitive'], width,
bottom=btm, color='#ff7e00')
btm += np.array(datasets[year]['unhealthy_sensitive'])
ax.bar(ind, datasets[year]['unhealthy'], width, bottom=btm,
color='#ff0000')
btm += np.array(datasets[year]['unhealthy'])
ax.bar(ind, datasets[year]['very_unhealthy'], width, bottom=btm,
color='#99004c')
btm += np.array(datasets[year]['very_unhealthy'])
ax.bar(ind, datasets[year]['hazardous'], width, bottom=btm,
color='#7e0023')
btm += np.array(datasets[year]['hazardous'])
ax.bar(ind, datasets[year]['out_of_scale'], width, bottom=btm,
color='#000000')
offset += 1
ax.set_ylabel(u'PM2.5 (µg/m³)')
dates = datasets.keys()
dates.sort()
ax.set_title('Stateair PM2.5 concentration from {} to {} ({})'.format(
dates[0], dates[-1], city))
ax.set_xticks(np.arange(N) + 0.05 + width * 2)
ax.set_xticklabels( ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec') )
if __name__ == '__main__':
if os.path.isdir(DATAROOT) is False:
sys.stderr.write("data folder not found ({})\n".format(DATAROOT))
sys.exit(1)
parser = argparse.ArgumentParser(description='stateair stats')
parser.add_argument('city', nargs='?', default=DEFAULT_CITY,
help='city to process')
args = parser.parse_args()
datafolder = os.path.join(DATAROOT, args.city.lower())
if os.path.isdir(datafolder) is False:
sys.stderr.write("city data folder not found ({})\n".format(datafolder))
sys.exit(1)
datasources = glob.glob(os.path.join(datafolder, '*.csv'))
if len(datasources) == 0:
sys.stdout.write("no CSV file found in the data folder ({})\n".format(datafolder))
sys.exit(0)
data = {}
for source in datasources:
src_process(data, source)
plot_stacked_bars(data, args.city)
plt.savefig(os.path.join(DATAROOT, 'stateair-{}-{}.png'.format(
args.city.lower(),
datetime.datetime.now().strftime('%Y%m%d%H%M%S'))))
| bsd-3-clause |
chrsrds/scikit-learn | examples/compose/plot_column_transformer_mixed_types.py | 6 | 3795 | """
===================================
Column Transformer with Mixed Types
===================================
This example illustrates how to apply different preprocessing and
feature extraction pipelines to different subsets of features,
using :class:`sklearn.compose.ColumnTransformer`.
This is particularly handy for the case of datasets that contain
heterogeneous data types, since we may want to scale the
numeric features and one-hot encode the categorical ones.
In this example, the numeric data is standard-scaled after
mean-imputation, while the categorical data is one-hot
encoded after imputing missing values with a new category
(``'missing'``).
Finally, the preprocessing pipeline is integrated in a
full prediction pipeline using :class:`sklearn.pipeline.Pipeline`,
together with a simple classification model.
"""
# Author: Pedro Morales <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.compose import ColumnTransformer
from sklearn.datasets import fetch_openml
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, GridSearchCV
np.random.seed(0)
# Load data from https://www.openml.org/d/40945
X, y = fetch_openml("titanic", version=1, as_frame=True, return_X_y=True)
# Alternatively X and y can be obtained directly from the frame attribute:
# X = titanic.frame.drop('survived', axis=1)
# y = titanic.frame['survived']
# We will train our classifier with the following features:
# Numeric Features:
# - age: float.
# - fare: float.
# Categorical Features:
# - embarked: categories encoded as strings {'C', 'S', 'Q'}.
# - sex: categories encoded as strings {'female', 'male'}.
# - pclass: ordinal integers {1, 2, 3}.
# We create the preprocessing pipelines for both numeric and categorical data.
numeric_features = ['age', 'fare']
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_features = ['embarked', 'sex', 'pclass']
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)])
# Append classifier to preprocessing pipeline.
# Now we have a full prediction pipeline.
clf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', LogisticRegression())])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
clf.fit(X_train, y_train)
print("model score: %.3f" % clf.score(X_test, y_test))
###############################################################################
# Using the prediction pipeline in a grid search
###############################################################################
# Grid search can also be performed on the different preprocessing steps
# defined in the ``ColumnTransformer`` object, together with the classifier's
# hyperparameters as part of the ``Pipeline``.
# We will search for both the imputer strategy of the numeric preprocessing
# and the regularization parameter of the logistic regression using
# :class:`sklearn.model_selection.GridSearchCV`.
param_grid = {
'preprocessor__num__imputer__strategy': ['mean', 'median'],
'classifier__C': [0.1, 1.0, 10, 100],
}
grid_search = GridSearchCV(clf, param_grid, cv=10)
grid_search.fit(X_train, y_train)
print(("best logistic regression from grid search: %.3f"
% grid_search.score(X_test, y_test)))
| bsd-3-clause |
alexherns/biotite-scripts | build_connection_graph.py | 1 | 1780 | #!/usr/bin/env python2.7
import networkx as nx
import matplotlib.pyplot as plt
import sys, argparse, os, re
parser = argparse.ArgumentParser(description='''Visualizes connections in assembly
using networkx_viewer module.''',
formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False)
#Required arguments
required = parser.add_argument_group('REQUIRED')
required.add_argument('-c', help= 'connections file', required=True, type=argparse.FileType('r'))
#Optional arguments
optional = parser.add_argument_group('OPTIONAL')
optional.add_argument('-h', action="help", help="show this help message and exit")
optional.add_argument('-o', metavar='<*.png>', type=argparse.FileType('w'))
optional.add_argument('-m', metavar='<int>', type=int, default=0)
args = parser.parse_args()
import networkx_viewer as nv
#Build the graph
G= nx.Graph()
nodes= []
edges= {}
for line in args.c:
line= line.strip().split('\t')
if 'accept' not in line or 'flanking' in line:
continue
attr= {}
#line[0]= re.search('(NODE_\d+)', line[0]).group()
#line[2]= re.search('(NODE_\d+)', line[2]).group()
if line[0]==line[2]:
nodes.append([line[0], {'self':'True', 'fill':'blue', 'direction':" ".join(line[:4]), 'count':line[4]}])
print line[0]+"\tSelf-edge"
continue
if line[0] not in nodes:
nodes.append(line[0])
if line[2] not in nodes:
nodes.append(line[2])
edge= sorted([line[0], line[2]])
lookup= "\t".join(edge)
if lookup in edges:
continue
if 'mid' in [line[1], line[3]]:
attr= {'fill':'red'}
attr['direction']= " ".join(line[:4])
attr['count']= line[4]
if int(attr['count'])<args.m:
continue
edge.append(attr)
edges[lookup]= edge
G.add_nodes_from(nodes)
G.add_edges_from(edges.values())
#Draw the graph
app= nv.Viewer(G)
app.mainloop()
| mit |
ChanderG/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 230 | 5234 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
| bsd-3-clause |
pxsdirac/tushare | tushare/stock/macro.py | 37 | 12728 | # -*- coding:utf-8 -*-
"""
宏观经济数据接口
Created on 2015/01/24
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
import pandas as pd
import numpy as np
import re
import json
from tushare.stock import macro_vars as vs
from tushare.stock import cons as ct
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def get_gdp_year():
"""
获取年度国内生产总值数据
Return
--------
DataFrame
year :统计年度
gdp :国内生产总值(亿元)
pc_gdp :人均国内生产总值(元)
gnp :国民生产总值(亿元)
pi :第一产业(亿元)
si :第二产业(亿元)
industry :工业(亿元)
cons_industry :建筑业(亿元)
ti :第三产业(亿元)
trans_industry :交通运输仓储邮电通信业(亿元)
lbdy :批发零售贸易及餐饮业(亿元)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[0], 0, 70,
rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.GDP_YEAR_COLS)
df[df==0] = np.NaN
return df
def get_gdp_quarter():
"""
获取季度国内生产总值数据
Return
--------
DataFrame
quarter :季度
gdp :国内生产总值(亿元)
gdp_yoy :国内生产总值同比增长(%)
pi :第一产业增加值(亿元)
pi_yoy:第一产业增加值同比增长(%)
si :第二产业增加值(亿元)
si_yoy :第二产业增加值同比增长(%)
ti :第三产业增加值(亿元)
ti_yoy :第三产业增加值同比增长(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[0], 1, 250,
rdint))
text = urlopen(request,timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.GDP_QUARTER_COLS)
df['quarter'] = df['quarter'].astype(object)
df[df==0] = np.NaN
return df
def get_gdp_for():
"""
获取三大需求对GDP贡献数据
Return
--------
DataFrame
year :统计年度
end_for :最终消费支出贡献率(%)
for_rate :最终消费支出拉动(百分点)
asset_for :资本形成总额贡献率(%)
asset_rate:资本形成总额拉动(百分点)
goods_for :货物和服务净出口贡献率(%)
goods_rate :货物和服务净出口拉动(百分点)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[0], 4, 80, rdint))
text = urlopen(request,timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
datastr = datastr.replace('"','').replace('null','0')
js = json.loads(datastr)
df = pd.DataFrame(js,columns=vs.GDP_FOR_COLS)
df[df==0] = np.NaN
return df
def get_gdp_pull():
"""
获取三大产业对GDP拉动数据
Return
--------
DataFrame
year :统计年度
gdp_yoy :国内生产总值同比增长(%)
pi :第一产业拉动率(%)
si :第二产业拉动率(%)
industry:其中工业拉动(%)
ti :第三产业拉动率(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[0], 5, 60, rdint))
text = urlopen(request,timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.GDP_PULL_COLS)
df[df==0] = np.NaN
return df
def get_gdp_contrib():
"""
获取三大产业贡献率数据
Return
--------
DataFrame
year :统计年度
gdp_yoy :国内生产总值
pi :第一产业献率(%)
si :第二产业献率(%)
industry:其中工业献率(%)
ti :第三产业献率(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'], rdint,
vs.MACRO_TYPE[0], 6, 60, rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.GDP_CONTRIB_COLS)
df[df==0] = np.NaN
return df
def get_cpi():
"""
获取居民消费价格指数数据
Return
--------
DataFrame
month :统计月份
cpi :价格指数
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[1], 0, 600,
rdint))
text = urlopen(request,timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.CPI_COLS)
df['cpi'] = df['cpi'].astype(float)
return df
def get_ppi():
"""
获取工业品出厂价格指数数据
Return
--------
DataFrame
month :统计月份
ppiip :工业品出厂价格指数
ppi :生产资料价格指数
qm:采掘工业价格指数
rmi:原材料工业价格指数
pi:加工工业价格指数
cg:生活资料价格指数
food:食品类价格指数
clothing:衣着类价格指数
roeu:一般日用品价格指数
dcg:耐用消费品价格指数
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[1], 3, 600,
rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.PPI_COLS)
for i in df.columns:
df[i] = df[i].apply(lambda x:np.where(x is None, np.NaN, x))
if i != 'month':
df[i] = df[i].astype(float)
return df
def get_deposit_rate():
"""
获取存款利率数据
Return
--------
DataFrame
date :变动日期
deposit_type :存款种类
rate:利率(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[2], 2, 600,
rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk')
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.DEPOSIT_COLS)
for i in df.columns:
df[i] = df[i].apply(lambda x:np.where(x is None, '--', x))
return df
def get_loan_rate():
"""
获取贷款利率数据
Return
--------
DataFrame
date :执行日期
loan_type :存款种类
rate:利率(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[2], 3, 800,
rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk')
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.LOAN_COLS)
for i in df.columns:
df[i] = df[i].apply(lambda x:np.where(x is None, '--', x))
return df
def get_rrr():
"""
获取存款准备金率数据
Return
--------
DataFrame
date :变动日期
before :调整前存款准备金率(%)
now:调整后存款准备金率(%)
changed:调整幅度(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[2], 4, 100,
rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk')
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.RRR_COLS)
for i in df.columns:
df[i] = df[i].apply(lambda x:np.where(x is None, '--', x))
return df
def get_money_supply():
"""
获取货币供应量数据
Return
--------
DataFrame
month :统计时间
m2 :货币和准货币(广义货币M2)(亿元)
m2_yoy:货币和准货币(广义货币M2)同比增长(%)
m1:货币(狭义货币M1)(亿元)
m1_yoy:货币(狭义货币M1)同比增长(%)
m0:流通中现金(M0)(亿元)
m0_yoy:流通中现金(M0)同比增长(%)
cd:活期存款(亿元)
cd_yoy:活期存款同比增长(%)
qm:准货币(亿元)
qm_yoy:准货币同比增长(%)
ftd:定期存款(亿元)
ftd_yoy:定期存款同比增长(%)
sd:储蓄存款(亿元)
sd_yoy:储蓄存款同比增长(%)
rests:其他存款(亿元)
rests_yoy:其他存款同比增长(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[2], 1, 600,
rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk')
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.MONEY_SUPPLY_COLS)
for i in df.columns:
df[i] = df[i].apply(lambda x:np.where(x is None, '--', x))
return df
def get_money_supply_bal():
"""
获取货币供应量(年底余额)数据
Return
--------
DataFrame
year :统计年度
m2 :货币和准货币(亿元)
m1:货币(亿元)
m0:流通中现金(亿元)
cd:活期存款(亿元)
qm:准货币(亿元)
ftd:定期存款(亿元)
sd:储蓄存款(亿元)
rests:其他存款(亿元)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[2], 0, 200,
rdint))
text = urlopen(request,timeout=10).read()
text = text.decode('gbk')
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.MONEY_SUPPLY_BLA_COLS)
for i in df.columns:
df[i] = df[i].apply(lambda x:np.where(x is None, '--', x))
return df
| bsd-3-clause |
jjhelmus/wradlib | examples/adjust_example.py | 1 | 9055 | # -------------------------------------------------------------------------------
# Name: adjust_example.py
# Purpose:
#
# Author: heistermann
#
# Created: 28.10.2011
# Copyright: (c) heistermann 2011
# Licence: The MIT License
# -------------------------------------------------------------------------------
#!/usr/bin/env python
import wradlib.adjust as adjust
import wradlib.verify as verify
import wradlib.util as util
import numpy as np
import matplotlib.pyplot as pl
#pl.interactive(True)
def ex_adjust():
###########################################################################
# 1d Example ##############################################################
###########################################################################
# gage and radar coordinates
obs_coords = np.array([5, 10, 15, 20, 30, 45, 65, 70, 77, 90])
radar_coords = np.arange(0, 101)
# true rainfall
truth = np.abs(1.5 + np.sin(0.075 * radar_coords)) + np.random.uniform(-0.1, 0.1, len(radar_coords))
# radar error
erroradd = 0.7 * np.sin(0.2 * radar_coords + 10.)
errormult = 0.75 + 0.015 * radar_coords
noise = np.random.uniform(-0.05, 0.05, len(radar_coords))
# radar observation
radar = errormult * truth + erroradd + noise
# gage observations are assumed to be perfect
obs = truth[obs_coords]
# add a missing value to observations (just for testing)
obs[1] = np.nan
# number of neighbours to be used
nnear_raws = 3
# adjust the radar observation by additive model
add_adjuster = adjust.AdjustAdd(obs_coords, radar_coords, nnear_raws=nnear_raws)
add_adjusted = add_adjuster(obs, radar)
# adjust the radar observation by multiplicative model
mult_adjuster = adjust.AdjustMultiply(obs_coords, radar_coords, nnear_raws=nnear_raws)
mult_adjusted = mult_adjuster(obs, radar)
# adjust the radar observation by MFB
mfb_adjuster = adjust.AdjustMFB(obs_coords, radar_coords, nnear_raws=nnear_raws)
mfb_adjusted = mfb_adjuster(obs, radar)
# adjust the radar observation by AdjustMixed
mixed_adjuster = adjust.AdjustMixed(obs_coords, radar_coords, nnear_raws=nnear_raws)
mixed_adjusted = mixed_adjuster(obs, radar)
# plotting
pl.plot(radar_coords, radar, 'k-', label="Unadjusted radar", linewidth=2., linestyle="dashed")
pl.xlabel("Distance (km)")
pl.ylabel("Rainfall intensity (mm/h)")
pl.plot(radar_coords, truth, 'k-', label="True rainfall", linewidth=2.)
pl.plot(obs_coords, obs, 'o', label="Gage observation", markersize=10.0, markerfacecolor="grey")
pl.plot(radar_coords, add_adjusted, '-', color="red", label="Additive adjustment")
pl.plot(radar_coords, mult_adjusted, '-', color="green", label="Multiplicative adjustment")
pl.plot(radar_coords, mfb_adjusted, '-', color="orange", label="Mean Field Bias adjustment")
pl.plot(radar_coords, mixed_adjusted, '-', color="blue", label="Mixed (mult./add.) adjustment")
pl.legend(prop={'size': 12})
pl.show()
# Verification for this example
rawerror = verify.ErrorMetrics(truth, radar)
mfberror = verify.ErrorMetrics(truth, mfb_adjusted)
adderror = verify.ErrorMetrics(truth, add_adjusted)
multerror = verify.ErrorMetrics(truth, mult_adjusted)
mixerror = verify.ErrorMetrics(truth, mixed_adjusted)
# Verification reports
maxval = 4.
fig = pl.figure(figsize=(14, 8))
ax = fig.add_subplot(231, aspect=1.)
rawerror.report(ax=ax, unit="mm", maxval=maxval)
ax.text(0.2, 0.9 * maxval, "Unadjusted radar")
ax = fig.add_subplot(232, aspect=1.)
adderror.report(ax=ax, unit="mm", maxval=maxval)
ax.text(0.2, 0.9 * maxval, "Additive adjustment")
ax = fig.add_subplot(233, aspect=1.)
multerror.report(ax=ax, unit="mm", maxval=maxval)
ax.text(0.2, 0.9 * maxval, "Multiplicative adjustment")
ax = fig.add_subplot(234, aspect=1.)
mixerror.report(ax=ax, unit="mm", maxval=maxval)
ax.text(0.2, 0.9 * maxval, "Mixed (mult./add.) adjustment")
mixerror.report(ax=ax, unit="mm", maxval=maxval)
ax = fig.add_subplot(235, aspect=1.)
mfberror.report(ax=ax, unit="mm", maxval=maxval)
ax.text(0.2, 0.9 * maxval, "Mean Field Bias adjustment")
pl.show()
###########################################################################
# 2d Example ##############################################################
###########################################################################
# a) CREATE SYNTHETIC DATA ------------------------------------------------
# grid axes
xgrid = np.arange(0, 10)
ygrid = np.arange(20, 30)
# number of observations
num_obs = 10
# create grid
gridshape = len(xgrid), len(ygrid)
grid_coords = util.gridaspoints(ygrid, xgrid)
# Synthetic true rainfall
truth = np.abs(10. * np.sin(0.1 * grid_coords).sum(axis=1))
# Creating radar data by perturbing truth with multiplicative and additive error
# YOU CAN EXPERIMENT WITH THE ERROR STRUCTURE
radar = 0.6 * truth + 1. * np.random.uniform(low=-1., high=1, size=len(truth))
radar[radar < 0.] = 0.
# indices for creating obs from raw (random placement of gauges)
obs_ix = np.random.uniform(low=0, high=len(grid_coords), size=num_obs).astype('i4')
# creating obs_coordinates
obs_coords = grid_coords[obs_ix]
# creating gauge observations from truth
obs = truth[obs_ix]
# b) GAUGE ADJUSTMENT -----------------------------------------------------
# Mean Field Bias Adjustment
mfbadjuster = adjust.AdjustMFB(obs_coords, grid_coords)
mfbadjusted = mfbadjuster(obs, radar)
# Additive Error Model
addadjuster = adjust.AdjustAdd(obs_coords, grid_coords)
addadjusted = addadjuster(obs, radar)
# Multiplicative Error Model
multadjuster = adjust.AdjustMultiply(obs_coords, grid_coords)
multadjusted = multadjuster(obs, radar)
# c) PLOTTING
# Maximum value (used for normalisation of colorscales)
maxval = np.max(np.concatenate((truth, radar, obs, addadjusted)).ravel())
# Helper functions for repeated plotting tasks
def scatterplot(x, y, title):
"""Quick and dirty helper function to produce scatter plots
"""
pl.scatter(x, y)
pl.plot([0, 1.2 * maxval], [0, 1.2 * maxval], '-', color='grey')
pl.xlabel("True rainfall (mm)")
pl.ylabel("Estimated rainfall (mm)")
pl.xlim(0, maxval + 0.1 * maxval)
pl.ylim(0, maxval + 0.1 * maxval)
pl.title(title)
def gridplot(data, title):
"""Quick and dirty helper function to produce a grid plot
"""
xplot = np.append(xgrid, xgrid[-1] + 1.) - 0.5
yplot = np.append(ygrid, ygrid[-1] + 1.) - 0.5
grd = ax.pcolormesh(xplot, yplot, data.reshape(gridshape), vmin=0, vmax=maxval)
ax.scatter(obs_coords[:, 0], obs_coords[:, 1], c=obs.ravel(), marker='s', s=50, vmin=0, vmax=maxval)
pl.colorbar(grd, shrink=0.7)
pl.title(title)
# open figure
fig = pl.figure(figsize=(10, 10))
# True rainfall
ax = fig.add_subplot(331, aspect='equal')
gridplot(truth, 'True rainfall')
# Unadjusted radar rainfall
ax = fig.add_subplot(332, aspect='equal')
gridplot(radar, 'Radar rainfall')
# Scatter plot radar vs. observations
ax = fig.add_subplot(333, aspect='equal')
scatterplot(truth, radar, 'Radar vs. Truth (red: Gauges)')
pl.plot(obs, radar[obs_ix], linestyle="None", marker="o", color="red")
# Adjusted radar rainfall (MFB)
ax = fig.add_subplot(334, aspect='equal')
gridplot(mfbadjusted, 'Adjusted (MFB)')
# Adjusted radar rainfall (additive)
ax = fig.add_subplot(335, aspect='equal')
gridplot(addadjusted, 'Adjusted (Add.)')
# Adjusted radar rainfall (multiplicative)
ax = fig.add_subplot(336, aspect='equal')
gridplot(multadjusted, 'Adjusted (Mult.)')
# Adjusted (MFB) vs. radar (for control purposes)
ax = fig.add_subplot(337, aspect='equal')
# scatterplot(obs, mfbadjusted[obs_ix], 'Adjusted (MFB) vs. Gauges\n(no x-validation!)')
scatterplot(truth, mfbadjusted, 'Adjusted (MFB) vs. Truth')
# Adjusted (Add) vs. radar (for control purposes)
ax = fig.add_subplot(338, aspect='equal')
# scatterplot(obs, addadjusted[obs_ix], 'Adjusted (Add.) vs. Gauges\n(no x-validation!)')
scatterplot(truth, addadjusted, 'Adjusted (Add.) vs. Truth')
# Adjusted (Mult.) vs. radar (for control purposes)
ax = fig.add_subplot(339, aspect='equal')
# scatterplot(obs, multadjusted[obs_ix], 'Adjusted (Mult.) vs. Gauges\n(no x-validation!)')
scatterplot(truth, multadjusted, 'Adjusted (Mult.) vs. Truth')
pl.tight_layout()
pl.show()
if __name__ == '__main__':
ex_adjust()
| mit |
debugger22/sympy | sympy/interactive/tests/test_ipythonprinting.py | 27 | 5891 | """Tests that the IPython printing module is properly loaded. """
from sympy.core.compatibility import u
from sympy.interactive.session import init_ipython_session
from sympy.external import import_module
from sympy.utilities.pytest import raises
# run_cell was added in IPython 0.11
ipython = import_module("IPython", min_module_version="0.11")
# disable tests if ipython is not present
if not ipython:
disabled = True
def test_ipythonprinting():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
# Printing without printing extension
app.run_cell("a = format(Symbol('pi'))")
app.run_cell("a2 = format(Symbol('pi')**2)")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
assert app.user_ns['a']['text/plain'] == "pi"
assert app.user_ns['a2']['text/plain'] == "pi**2"
else:
assert app.user_ns['a'][0]['text/plain'] == "pi"
assert app.user_ns['a2'][0]['text/plain'] == "pi**2"
# Load printing extension
app.run_cell("from sympy import init_printing")
app.run_cell("init_printing()")
# Printing with printing extension
app.run_cell("a = format(Symbol('pi'))")
app.run_cell("a2 = format(Symbol('pi')**2)")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
assert app.user_ns['a']['text/plain'] in (u('\N{GREEK SMALL LETTER PI}'), 'pi')
assert app.user_ns['a2']['text/plain'] in (u(' 2\n\N{GREEK SMALL LETTER PI} '), ' 2\npi ')
else:
assert app.user_ns['a'][0]['text/plain'] in (u('\N{GREEK SMALL LETTER PI}'), 'pi')
assert app.user_ns['a2'][0]['text/plain'] in (u(' 2\n\N{GREEK SMALL LETTER PI} '), ' 2\npi ')
def test_print_builtin_option():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
app.run_cell("from sympy import init_printing")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
raises(KeyError, lambda: app.user_ns['a']['text/latex'])
else:
text = app.user_ns['a'][0]['text/plain']
raises(KeyError, lambda: app.user_ns['a'][0]['text/latex'])
# Note : In Python 3 the text is unicode, but in 2 it is a string.
# XXX: How can we make this ignore the terminal width? This test fails if
# the terminal is too narrow.
assert text in ("{pi: 3.14, n_i: 3}",
u('{n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3, \N{GREEK SMALL LETTER PI}: 3.14}'),
"{n_i: 3, pi: 3.14}",
u('{\N{GREEK SMALL LETTER PI}: 3.14, n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3}'))
# If we enable the default printing, then the dictionary's should render
# as a LaTeX version of the whole dict: ${\pi: 3.14, n_i: 3}$
app.run_cell("inst.display_formatter.formatters['text/latex'].enabled = True")
app.run_cell("init_printing(use_latex=True)")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
latex = app.user_ns['a']['text/latex']
else:
text = app.user_ns['a'][0]['text/plain']
latex = app.user_ns['a'][0]['text/latex']
assert text in ("{pi: 3.14, n_i: 3}",
u('{n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3, \N{GREEK SMALL LETTER PI}: 3.14}'),
"{n_i: 3, pi: 3.14}",
u('{\N{GREEK SMALL LETTER PI}: 3.14, n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3}'))
assert latex == r'$$\left \{ n_{i} : 3, \quad \pi : 3.14\right \}$$'
app.run_cell("inst.display_formatter.formatters['text/latex'].enabled = True")
app.run_cell("init_printing(use_latex=True, print_builtin=False)")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
raises(KeyError, lambda: app.user_ns['a']['text/latex'])
else:
text = app.user_ns['a'][0]['text/plain']
raises(KeyError, lambda: app.user_ns['a'][0]['text/latex'])
# Note : In Python 3 the text is unicode, but in 2 it is a string.
# Python 3.3.3 + IPython 0.13.2 gives: '{n_i: 3, pi: 3.14}'
# Python 3.3.3 + IPython 1.1.0 gives: '{n_i: 3, pi: 3.14}'
# Python 2.7.5 + IPython 1.1.0 gives: '{pi: 3.14, n_i: 3}'
assert text in ("{pi: 3.14, n_i: 3}", "{n_i: 3, pi: 3.14}")
def test_matplotlib_bad_latex():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("import IPython")
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import init_printing, Matrix")
app.run_cell("init_printing(use_latex='matplotlib')")
# The png formatter is not enabled by default in this context
app.run_cell("inst.display_formatter.formatters['image/png'].enabled = True")
# Make sure no warnings are raised by IPython
app.run_cell("import warnings")
app.run_cell("warnings.simplefilter('error', IPython.core.formatters.FormatterWarning)")
# This should not raise an exception
app.run_cell("a = format(Matrix([1, 2, 3]))")
| bsd-3-clause |
sauliusl/dgw | setup.py | 2 | 2090 | from setuptools import setup, Extension
import os
try:
from Cython.Build import cythonize
except ImportError:
cython_supported = False
else:
cython_supported = True
import numpy
np_lib = os.path.dirname(numpy.__file__)
np_inc = [os.path.join(np_lib, 'core/include')]
cmdclass = {}
# Compile packages from mlpy distribution
if cython_supported:
ext_modules = [Extension("dgw._mlpy.dtw",
["mlpy_src/dtw/cdtw.c",
"mlpy_src/dtw/dtw.pyx"],
include_dirs=np_inc)]
ext_modules = cythonize(ext_modules)
else:
ext_modules = [Extension("dgw._mlpy.dtw",
["mlpy_src/dtw/cdtw.c",
"mlpy_src/dtw/dtw.c"],
include_dirs=np_inc)]
setup(
name='dgw',
version='0.1.1',
packages=['dgw', 'dgw.bin', 'dgw.cli', 'dgw.cluster', 'dgw.data', 'dgw.data.parsers', 'dgw.data.visualisation', 'dgw._mlpy',
'dgw.dtw', 'dgw.tests.data.parsers', 'dgw.tests.data', 'dgw.tests.dtw', 'dgw.tests', 'dgw'],
install_requires=['argparse',
'numpy>=1.6.1', 'scipy>=0.9.0', 'pandas>=0.10.1', 'pysam>=0.7.4',
'fastcluster>=1.1.7'],
extras_require ={
'visualisation': ['matplotlib>= 1.1.0',
'palettable>=2.1.1',
'seaborn>=0.7.1']
},
entry_points={
'console_scripts': [
'dgw-extract-gene-regions = dgw.bin.extract_gene_regions:main',
'dgw-overlaps2poi = dgw.bin.overlaps2poi:main',
'dgw-prototypes2dot = dgw.bin.prototypes2dot:main [visualisation]',
'dgw-worker = dgw.bin.worker:main'
],
'gui_scripts': [
'dgw-explorer = dgw.bin.explorer:main [visualisation]',
]
},
ext_modules=ext_modules,
url='http://sauliusl.github.com/dgw/',
license='GPLv3',
author='Saulius Lukauskas',
author_email='[email protected]',
description='Dynamic Genome Warping',
cmdclass=cmdclass,
)
| gpl-3.0 |
alekz112/statsmodels | statsmodels/datasets/macrodata/data.py | 25 | 3184 | """United States Macroeconomic data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = __doc__
SOURCE = """
Compiled by Skipper Seabold. All data are from the Federal Reserve Bank of St.
Louis [1] except the unemployment rate which was taken from the National
Bureau of Labor Statistics [2]. ::
[1] Data Source: FRED, Federal Reserve Economic Data, Federal Reserve Bank of
St. Louis; http://research.stlouisfed.org/fred2/; accessed December 15,
2009.
[2] Data Source: Bureau of Labor Statistics, U.S. Department of Labor;
http://www.bls.gov/data/; accessed December 15, 2009.
"""
DESCRSHORT = """US Macroeconomic Data for 1959Q1 - 2009Q3"""
DESCRLONG = DESCRSHORT
NOTE = """::
Number of Observations - 203
Number of Variables - 14
Variable name definitions::
year - 1959q1 - 2009q3
quarter - 1-4
realgdp - Real gross domestic product (Bil. of chained 2005 US$,
seasonally adjusted annual rate)
realcons - Real personal consumption expenditures (Bil. of chained
2005 US$, seasonally adjusted annual rate)
realinv - Real gross private domestic investment (Bil. of chained
2005 US$, seasonally adjusted annual rate)
realgovt - Real federal consumption expenditures & gross investment
(Bil. of chained 2005 US$, seasonally adjusted annual rate)
realdpi - Real private disposable income (Bil. of chained 2005
US$, seasonally adjusted annual rate)
cpi - End of the quarter consumer price index for all urban
consumers: all items (1982-84 = 100, seasonally adjusted).
m1 - End of the quarter M1 nominal money stock (Seasonally
adjusted)
tbilrate - Quarterly monthly average of the monthly 3-month
treasury bill: secondary market rate
unemp - Seasonally adjusted unemployment rate (%)
pop - End of the quarter total population: all ages incl. armed
forces over seas
infl - Inflation rate (ln(cpi_{t}/cpi_{t-1}) * 400)
realint - Real interest rate (tbilrate - infl)
"""
from numpy import recfromtxt, column_stack, array
from pandas import DataFrame
from statsmodels.datasets.utils import Dataset
from os.path import dirname, abspath
def load():
"""
Load the US macro data and return a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
Notes
-----
The macrodata Dataset instance does not contain endog and exog attributes.
"""
data = _get_data()
names = data.dtype.names
dataset = Dataset(data=data, names=names)
return dataset
def load_pandas():
dataset = load()
dataset.data = DataFrame(dataset.data)
return dataset
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/macrodata.csv', 'rb'), delimiter=",",
names=True, dtype=float)
return data
| bsd-3-clause |
sssllliang/edx-analytics-pipeline | edx/analytics/tasks/reports/enrollments.py | 2 | 11885 | """Enrollment related reports"""
import csv
from datetime import timedelta, date
import luigi
import luigi.hdfs
import numpy
import pandas
from edx.analytics.tasks.util.tsv import read_tsv
from edx.analytics.tasks.url import ExternalURL, get_target_from_url, url_path_join
from edx.analytics.tasks.course_enroll import CourseEnrollmentChangesPerDay
from edx.analytics.tasks.mapreduce import MapReduceJobTaskMixin
from edx.analytics.tasks.util.opaque_key_util import get_org_id_for_course
DEFAULT_NUM_WEEKS = 52
DEFAULT_NUM_DAYS = 28
class CourseEnrollmentCountMixin(MapReduceJobTaskMixin):
""" Provides common parameters used in executive report tasks """
name = luigi.Parameter()
src = luigi.Parameter(
is_list=True,
config_path={'section': 'enrollment-reports', 'name': 'src'},
)
include = luigi.Parameter(is_list=True, default=('*',))
weeks = luigi.IntParameter(default=DEFAULT_NUM_WEEKS)
days = luigi.Parameter(default=DEFAULT_NUM_DAYS)
offsets = luigi.Parameter(default=None)
history = luigi.Parameter(default=None)
date = luigi.DateParameter(default=date.today())
statuses = luigi.Parameter(default=None)
manifest = luigi.Parameter(default=None)
manifest_path = luigi.Parameter(default=None)
destination_directory = luigi.Parameter(default=None)
destination = luigi.Parameter(config_path={'section': 'enrollment-reports', 'name': 'destination'})
credentials = luigi.Parameter(
config_path={'section': 'database-import', 'name': 'credentials'}
)
blacklist = luigi.Parameter(config_path={'section': 'enrollment-reports', 'name': 'blacklist'})
"""Provides methods useful for generating reports using course enrollment counts."""
def read_course_date_count_tsv(self, input_file):
"""Read TSV file with hard-coded column names into a pandas DataFrame."""
names = ['course_id', 'date', 'count']
# Not assuming any encoding, course_id will be read as plain string
data = read_tsv(input_file, names)
data.date = pandas.to_datetime(data.date)
return data
def initialize_daily_count(self, course_date_count_data):
"""
Reorganize a course-date-count data table to index by date.
Args:
Pandas dataframe with one row per course_id and
columns for the date and count of the offset.
Returns:
Pandas dataframe with one column per course_id, and
indexed rows for the date. Counts are set to zero for
dates that are missing.
"""
data = course_date_count_data.pivot(
index='date',
columns='course_id',
values='count',
)
# Complete the range of data to include all days between
# the dates of the first and last events.
date_range = pandas.date_range(min(data.index), max(data.index))
data = data.reindex(date_range)
data = data.fillna(0)
return data
def add_offsets_to_daily_count(self, count_by_day, offsets):
"""
Add offsets to a dataframe in-place.
Args:
count_by_day: Pandas dataframe with one column per course_id, and
indexed rows for the date.
offsets: Pandas dataframe with one row per course_id and
columns for the date and count of the offset.
"""
for _, (course_id, date, count) in offsets.iterrows():
if course_id in count_by_day.columns:
# The offsets are computed to beginning of that day. We
# add them to the counts by the end of that day to
# get the correct count for the day.
count_by_day.loc[date, course_id] += count
else:
# We have an offset for the course, but no current
# counts. Create an course entry, set the offset, and set
# all subsequent counts to zero.
count_by_day.loc[date, course_id] = count
count_by_day.loc[count_by_day.index > date, course_id] = 0
# Flag values before the offset day with NaN,
# since they are not "available".
not_available = count_by_day.index < date
count_by_day.loc[not_available, course_id] = numpy.NaN
def calculate_total_enrollment(self, count_by_day, offsets=None):
"""
Accumulate enrollment changes per day to find total enrollment per day.
Args:
count_by_day: Pandas dataframe with one column per course_id, and
indexed rows for the date. Counts are net changes in enrollment
during the day for each course.
offsets: Pandas dataframe with one row per course_id and
columns for the date and count of the offset. The offset
for a course is used to provide total enrollment counts
at a point in time right before the timeframe covered by count_by_day.
"""
if offsets is not None:
self.add_offsets_to_daily_count(count_by_day, offsets)
# Calculate the cumulative sum per day of the input.
# Entries with NaN stay NaN.
# At this stage only the data prior to the offset should contain NaN.
cumulative_sum = count_by_day.cumsum()
return cumulative_sum
def select_weekly_values(self, daily_values, start, weeks):
"""
Sample daily values on a weekly basis.
Args:
daily_values: Pandas dataframe with one column per course_id, and
indexed rows for the date.
start: last day to request.
weeks: number of weeks to sample (including the last day)
"""
# List the dates of the last day of each week requested.
days = [start - timedelta(i * 7) for i in reversed(xrange(0, weeks))]
# Sample the cumulative data on the requested days.
# Result is NaN if there is no data available for that date.
results = daily_values.loc[days]
return results
class EnrollmentsByWeek(luigi.Task, CourseEnrollmentCountMixin):
"""Calculates cumulative enrollments per week per course.
Parameters:
source: Location of daily enrollments per date. The format is a hadoop
tsv file, with fields course_id, date and count.
destination: Location of the resulting report. The output format is a
excel csv file with course_id and one column per requested week.
offsets: Location of seed values for each course. The format is a
hadoop tsv file, with fields course_id, date and offset.
date: End date of the last week requested.
weeks: Number of weeks from the end date to request.
Output:
Excel CSV file with one row per course. The columns are
the cumulative enrollments counts for each week requested.
"""
def requires(self):
results = {
'source': CourseEnrollmentChangesPerDay(
name=self.name,
src=self.src,
dest=self.destination,
include=self.include,
manifest=self.manifest,
mapreduce_engine=self.mapreduce_engine,
lib_jar=self.lib_jar,
n_reduce_tasks=self.n_reduce_tasks
)
}
if self.offsets:
results.update({'offsets': ExternalURL(self.offsets)})
if self.statuses:
results.update({'statuses': ExternalURL(self.statuses)})
return results
def output(self):
return get_target_from_url(url_path_join(self.destination, "weekly_enrollments_{0}.csv".format(self.name)))
def run(self):
# Load the data into pandas dataframes
daily_enrollment_changes = self.read_source()
offsets = self.read_offsets()
daily_enrollment_totals = self.calculate_total_enrollment(daily_enrollment_changes, offsets)
# Sample the cumulative data on the requested days.
# Result is NaN if there is no data available for that date.
weekly_enrollment_totals = self.select_weekly_values(
daily_enrollment_totals,
self.date,
self.weeks
)
statuses = self.read_statuses()
with self.output().open('w') as output_file:
self.save_output(weekly_enrollment_totals, statuses, output_file)
def read_source(self):
"""
Read source into a pandas DataFrame.
Returns:
Pandas dataframe with one column per course_id. Indexed
for the time interval available in the source data.
"""
with self.input()['source'].open('r') as input_file:
course_date_count_data = self.read_course_date_count_tsv(input_file)
data = self.initialize_daily_count(course_date_count_data)
return data
def read_offsets(self):
"""
Read offsets into a pandas DataFrame.
Returns:
Pandas dataframe with one row per course_id and
columns for the date and count of the offset.
Returns None if no offset was specified.
"""
data = None
if self.input().get('offsets'):
with self.input()['offsets'].open('r') as offset_file:
data = self.read_course_date_count_tsv(offset_file)
return data
def read_statuses(self):
"""
Read course statuses into a pandas DataFrame.
Returns:
Pandas dataframe with one row per course_id and
a column for the status. The status should
be either "past", "current" or "new". The index
for the DataFrame is the course_id.
Returns None if no statuses was specified.
"""
data = None
names = ['course_id', 'status']
if self.input().get('statuses'):
with self.input()['statuses'].open('r') as status_file:
data = read_tsv(status_file, names)
data = data.set_index('course_id')
return data
def save_output(self, results, statuses, output_file):
results = results.transpose()
# List of fieldnames for the report
fieldnames = ['status', 'course_id', 'org_id'] + list(results.columns)
writer = csv.DictWriter(output_file, fieldnames)
writer.writerow(dict((k, k) for k in fieldnames)) # Write header
def format_counts(counts_dict):
for k, v in counts_dict.iteritems():
yield k, '-' if numpy.isnan(v) else int(v)
for course_id, series in results.iterrows():
# Course_id is passed throughout these reports as a
# utf8-encoded str, so it must be locally converted to
# unicode before parsing for org.
org_id = get_org_id_for_course(course_id.decode('utf-8'))
values = {
'course_id': course_id,
'status': self.get_status_for_course(course_id, statuses),
'org_id': org_id or '-',
}
by_week_values = format_counts(series.to_dict())
values.update(by_week_values)
writer.writerow(values)
def get_status_for_course(self, course_id, statuses):
'''
Args:
course_id(str): The identifier for the course. Should be formatted
as <org_id>/<name>/<run>.
statuses(pandas.DataFrame): A pandas DataFrame mapping course_ids
to course statuses. It is expected to be indexed on course_id.
Returns:
The course's status as a string.
'''
if statuses is None or course_id not in statuses.index:
return '-'
return statuses.loc[course_id]['status']
| agpl-3.0 |
jimgoo/zipline-fork | zipline/finance/performance/position_tracker.py | 3 | 17041 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import logbook
import numpy as np
import pandas as pd
from pandas.lib import checknull
from collections import namedtuple
try:
# optional cython based OrderedDict
from cyordereddict import OrderedDict
except ImportError:
from collections import OrderedDict
from six import iteritems, itervalues
from zipline.protocol import Event, DATASOURCE_TYPE
from zipline.finance.slippage import Transaction
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
import zipline.protocol as zp
from zipline.assets import (
Equity, Future
)
from zipline.errors import PositionTrackerMissingAssetFinder
from . position import positiondict
log = logbook.Logger('Performance')
PositionStats = namedtuple('PositionStats',
['net_exposure',
'gross_value',
'gross_exposure',
'short_value',
'short_exposure',
'shorts_count',
'long_value',
'long_exposure',
'longs_count',
'net_value'])
def calc_position_values(amounts,
last_sale_prices,
value_multipliers):
iter_amount_price_multiplier = zip(
amounts,
last_sale_prices,
itervalues(value_multipliers),
)
return [
price * amount * multiplier for
price, amount, multiplier in iter_amount_price_multiplier
]
def calc_net(values):
# Returns 0.0 if there are no values.
return sum(values, np.float64())
def calc_position_exposures(amounts,
last_sale_prices,
exposure_multipliers):
iter_amount_price_multiplier = zip(
amounts,
last_sale_prices,
itervalues(exposure_multipliers),
)
return [
price * amount * multiplier for
price, amount, multiplier in iter_amount_price_multiplier
]
def calc_long_value(position_values):
return sum(i for i in position_values if i > 0)
def calc_short_value(position_values):
return sum(i for i in position_values if i < 0)
def calc_long_exposure(position_exposures):
return sum(i for i in position_exposures if i > 0)
def calc_short_exposure(position_exposures):
return sum(i for i in position_exposures if i < 0)
def calc_longs_count(position_exposures):
return sum(1 for i in position_exposures if i > 0)
def calc_shorts_count(position_exposures):
return sum(1 for i in position_exposures if i < 0)
def calc_gross_exposure(long_exposure, short_exposure):
return long_exposure + abs(short_exposure)
def calc_gross_value(long_value, short_value):
return long_value + abs(short_value)
def calc_position_stats(pt):
amounts = []
last_sale_prices = []
for pos in itervalues(pt.positions):
amounts.append(pos.amount)
last_sale_prices.append(pos.last_sale_price)
position_value_multipliers = pt._position_value_multipliers
position_exposure_multipliers = pt._position_exposure_multipliers
position_values = calc_position_values(
amounts,
last_sale_prices,
position_value_multipliers
)
position_exposures = calc_position_exposures(
amounts,
last_sale_prices,
position_exposure_multipliers
)
long_value = calc_long_value(position_values)
short_value = calc_short_value(position_values)
gross_value = calc_gross_value(long_value, short_value)
long_exposure = calc_long_exposure(position_exposures)
short_exposure = calc_short_exposure(position_exposures)
gross_exposure = calc_gross_exposure(long_exposure, short_exposure)
net_exposure = calc_net(position_exposures)
longs_count = calc_longs_count(position_exposures)
shorts_count = calc_shorts_count(position_exposures)
net_value = calc_net(position_values)
return PositionStats(
long_value=long_value,
gross_value=gross_value,
short_value=short_value,
long_exposure=long_exposure,
short_exposure=short_exposure,
gross_exposure=gross_exposure,
net_exposure=net_exposure,
longs_count=longs_count,
shorts_count=shorts_count,
net_value=net_value
)
class PositionTracker(object):
def __init__(self, asset_finder):
self.asset_finder = asset_finder
# sid => position object
self.positions = positiondict()
# Arrays for quick calculations of positions value
self._position_value_multipliers = OrderedDict()
self._position_exposure_multipliers = OrderedDict()
self._position_payout_multipliers = OrderedDict()
self._unpaid_dividends = pd.DataFrame(
columns=zp.DIVIDEND_PAYMENT_FIELDS,
)
self._positions_store = zp.Positions()
# Dict, keyed on dates, that contains lists of close position events
# for any Assets in this tracker's positions
self._auto_close_position_sids = {}
def _update_asset(self, sid):
try:
self._position_value_multipliers[sid]
self._position_exposure_multipliers[sid]
self._position_payout_multipliers[sid]
except KeyError:
# Check if there is an AssetFinder
if self.asset_finder is None:
raise PositionTrackerMissingAssetFinder()
# Collect the value multipliers from applicable sids
asset = self.asset_finder.retrieve_asset(sid)
if isinstance(asset, Equity):
self._position_value_multipliers[sid] = 1
self._position_exposure_multipliers[sid] = 1
self._position_payout_multipliers[sid] = 0
if isinstance(asset, Future):
self._position_value_multipliers[sid] = 0
self._position_exposure_multipliers[sid] = \
asset.contract_multiplier
self._position_payout_multipliers[sid] = \
asset.contract_multiplier
# Futures auto-close timing is controlled by the Future's
# auto_close_date property
self._insert_auto_close_position_date(
dt=asset.auto_close_date,
sid=sid
)
def _insert_auto_close_position_date(self, dt, sid):
"""
Inserts the given SID in to the list of positions to be auto-closed by
the given dt.
Parameters
----------
dt : pandas.Timestamp
The date before-which the given SID will be auto-closed
sid : int
The SID of the Asset to be auto-closed
"""
if dt is not None:
self._auto_close_position_sids.setdefault(dt, set()).add(sid)
def auto_close_position_events(self, next_trading_day):
"""
Generates CLOSE_POSITION events for any SIDs whose auto-close date is
before or equal to the given date.
Parameters
----------
next_trading_day : pandas.Timestamp
The time before-which certain Assets need to be closed
Yields
------
Event
A close position event for any sids that should be closed before
the next_trading_day parameter
"""
past_asset_end_dates = set()
# Check the auto_close_position_dates dict for SIDs to close
for date, sids in self._auto_close_position_sids.items():
if date > next_trading_day:
continue
past_asset_end_dates.add(date)
for sid in sids:
# Yield a CLOSE_POSITION event
event = Event({
'dt': date,
'type': DATASOURCE_TYPE.CLOSE_POSITION,
'sid': sid,
})
yield event
# Clear out past dates
while past_asset_end_dates:
self._auto_close_position_sids.pop(past_asset_end_dates.pop())
def update_last_sale(self, event):
# NOTE, PerformanceTracker already vetted as TRADE type
sid = event.sid
if sid not in self.positions:
return 0
price = event.price
if checknull(price):
return 0
pos = self.positions[sid]
old_price = pos.last_sale_price
pos.last_sale_date = event.dt
pos.last_sale_price = price
# Calculate cash adjustment on assets with multipliers
return ((price - old_price) * self._position_payout_multipliers[sid]
* pos.amount)
def update_positions(self, positions):
# update positions in batch
self.positions.update(positions)
for sid, pos in iteritems(positions):
self._update_asset(sid)
def update_position(self, sid, amount=None, last_sale_price=None,
last_sale_date=None, cost_basis=None):
pos = self.positions[sid]
if amount is not None:
pos.amount = amount
self._update_asset(sid=sid)
if last_sale_price is not None:
pos.last_sale_price = last_sale_price
if last_sale_date is not None:
pos.last_sale_date = last_sale_date
if cost_basis is not None:
pos.cost_basis = cost_basis
def execute_transaction(self, txn):
# Update Position
# ----------------
sid = txn.sid
position = self.positions[sid]
position.update(txn)
self._update_asset(sid)
def handle_commission(self, commission):
# Adjust the cost basis of the stock if we own it
if commission.sid in self.positions:
self.positions[commission.sid].\
adjust_commission_cost_basis(commission)
def handle_split(self, split):
if split.sid in self.positions:
# Make the position object handle the split. It returns the
# leftover cash from a fractional share, if there is any.
position = self.positions[split.sid]
leftover_cash = position.handle_split(split)
self._update_asset(split.sid)
return leftover_cash
def _maybe_earn_dividend(self, dividend):
"""
Take a historical dividend record and return a Series with fields in
zipline.protocol.DIVIDEND_FIELDS (plus an 'id' field) representing
the cash/stock amount we are owed when the dividend is paid.
"""
if dividend['sid'] in self.positions:
return self.positions[dividend['sid']].earn_dividend(dividend)
else:
return zp.dividend_payment()
def earn_dividends(self, dividend_frame):
"""
Given a frame of dividends whose ex_dates are all the next trading day,
calculate and store the cash and/or stock payments to be paid on each
dividend's pay date.
"""
earned = dividend_frame.apply(self._maybe_earn_dividend, axis=1)\
.dropna(how='all')
if len(earned) > 0:
# Store the earned dividends so that they can be paid on the
# dividends' pay_dates.
self._unpaid_dividends = pd.concat(
[self._unpaid_dividends, earned],
)
def _maybe_pay_dividend(self, dividend):
"""
Take a historical dividend record, look up any stored record of
cash/stock we are owed for that dividend, and return a Series
with fields drawn from zipline.protocol.DIVIDEND_PAYMENT_FIELDS.
"""
try:
unpaid_dividend = self._unpaid_dividends.loc[dividend['id']]
return unpaid_dividend
except KeyError:
return zp.dividend_payment()
def pay_dividends(self, dividend_frame):
"""
Given a frame of dividends whose pay_dates are all the next trading
day, grant the cash and/or stock payments that were calculated on the
given dividends' ex dates.
"""
payments = dividend_frame.apply(self._maybe_pay_dividend, axis=1)\
.dropna(how='all')
# Mark these dividends as paid by dropping them from our unpaid
# table.
self._unpaid_dividends.drop(payments.index)
# Add stock for any stock dividends paid. Again, the values here may
# be negative in the case of short positions.
stock_payments = payments[payments['payment_sid'].notnull()]
for _, row in stock_payments.iterrows():
stock = row['payment_sid']
share_count = row['share_count']
# note we create a Position for stock dividend if we don't
# already own the asset
position = self.positions[stock]
position.amount += share_count
self._update_asset(stock)
# Add cash equal to the net cash payed from all dividends. Note that
# "negative cash" is effectively paid if we're short an asset,
# representing the fact that we're required to reimburse the owner of
# the stock for any dividends paid while borrowing.
net_cash_payment = payments['cash_amount'].fillna(0).sum()
return net_cash_payment
def maybe_create_close_position_transaction(self, event):
try:
pos = self.positions[event.sid]
amount = pos.amount
if amount == 0:
return None
except KeyError:
return None
if 'price' in event:
price = event.price
else:
price = pos.last_sale_price
txn = Transaction(
sid=event.sid,
amount=(-1 * pos.amount),
dt=event.dt,
price=price,
commission=0,
order_id=0
)
return txn
def get_positions(self):
positions = self._positions_store
for sid, pos in iteritems(self.positions):
if pos.amount == 0:
# Clear out the position if it has become empty since the last
# time get_positions was called. Catching the KeyError is
# faster than checking `if sid in positions`, and this can be
# potentially called in a tight inner loop.
try:
del positions[sid]
except KeyError:
pass
continue
# Note that this will create a position if we don't currently have
# an entry
position = positions[sid]
position.amount = pos.amount
position.cost_basis = pos.cost_basis
position.last_sale_price = pos.last_sale_price
return positions
def get_positions_list(self):
positions = []
for sid, pos in iteritems(self.positions):
if pos.amount != 0:
positions.append(pos.to_dict())
return positions
def __getstate__(self):
state_dict = {}
state_dict['asset_finder'] = self.asset_finder
state_dict['positions'] = dict(self.positions)
state_dict['unpaid_dividends'] = self._unpaid_dividends
state_dict['auto_close_position_sids'] = self._auto_close_position_sids
STATE_VERSION = 3
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 3
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("PositionTracker saved state is too old.")
self.asset_finder = state['asset_finder']
self.positions = positiondict()
# note that positions_store is temporary and gets regened from
# .positions
self._positions_store = zp.Positions()
self._unpaid_dividends = state['unpaid_dividends']
self._auto_close_position_sids = state['auto_close_position_sids']
# Arrays for quick calculations of positions value
self._position_value_multipliers = OrderedDict()
self._position_exposure_multipliers = OrderedDict()
self._position_payout_multipliers = OrderedDict()
# Update positions is called without a finder
self.update_positions(state['positions'])
| apache-2.0 |
refgenomics/onecodex | tests/test_metadata_upload.py | 2 | 8329 | import mock
import pytest
from onecodex.exceptions import ValidationError
from onecodex.metadata_upload import (
validate_appendables,
validate_tags,
validate_metadata,
validate_metadata_against_schema,
validate_number,
validate_enum,
validate_boolean,
validate_datetime,
is_blacklisted,
coerce_custom_value,
)
def schema_rules(ocx, value):
if value:
return ocx.Metadata._resource._schema["properties"][value]
else:
return ocx.Metadata._resource._schema["properties"]
def test_validate_appendables(ocx):
initial_appendables = {"metadata": {"foo": "bar", "starred": "true"}, "tags": ["baz"]}
final_appendables = dict(initial_appendables)
final_appendables["valid_metadata"] = {"starred": True}
final_appendables["valid_metadata"]["custom"] = {"foo": "bar"}
final_appendables["valid_tags"] = [{"name": "baz"}]
appendables = validate_appendables(initial_appendables, ocx)
assert appendables == final_appendables
def test_validate_tags_valid(ocx):
initial_appendables = {
"metadata": {},
"tags": ["baz"],
"valid_tags": [],
"valid_metadata": {"custom": {}},
}
validate_tags(initial_appendables, ocx)
assert initial_appendables["valid_tags"] == [{"name": "baz"}]
def test_validate_tags_invalid(ocx):
too_many_characters = ocx.Tags._resource._schema["properties"]["name"]["maxLength"] + 1
invalid_tag = "a" * too_many_characters
initial_appendables = {
"metadata": {},
"tags": [invalid_tag],
"valid_tags": [],
"valid_metadata": {"custom": {}},
}
with pytest.raises(ValidationError):
validate_tags(initial_appendables, ocx)
def test_validate_metadata_valid(ocx):
initial_appendables = {
"metadata": {"starred": "true"},
"tags": [],
"valid_tags": [],
"valid_metadata": {"custom": {}},
}
final_appendables = dict(initial_appendables)
final_appendables["valid_metadata"]["starred"] = True
validate_metadata(initial_appendables, ocx)
assert initial_appendables == final_appendables
def test_validate_metadata_not_present(ocx):
initial_appendables = {"tags": [], "valid_tags": [], "valid_metadata": {"custom": {}}}
final_appendables = dict(initial_appendables)
validate_metadata(initial_appendables, ocx)
assert initial_appendables == final_appendables
def test_validate_metadata_blacklisted(ocx):
initial_appendables = {
"metadata": {"$uri": "invalid_entry"},
"tags": [],
"valid_tags": [],
"valid_metadata": {"custom": {}},
}
with pytest.raises(ValidationError):
validate_metadata(initial_appendables, ocx)
def test_validate_metadata_custom_valid(ocx):
initial_appendables = {
"metadata": {"foo": "bar"},
"tags": [],
"valid_tags": [],
"valid_metadata": {"custom": {}},
}
final_appendables = dict(initial_appendables)
final_appendables["valid_metadata"]["custom"]["foo"] = "bar"
validate_metadata(initial_appendables, ocx)
assert initial_appendables == final_appendables
def test_validate_metadata_against_schema(ocx):
schema_props = schema_rules(ocx, None)
with mock.patch("onecodex.metadata_upload.validate_enum") as mock_enum:
validate_metadata_against_schema(schema_props, "platform", "Illumina")
assert mock_enum.call_count == 1
with mock.patch("onecodex.metadata_upload.validate_number") as mock_number:
validate_metadata_against_schema(schema_props, "location_lat", "90")
assert mock_number.call_count == 1
with mock.patch("onecodex.metadata_upload.validate_boolean") as mock_bool:
validate_metadata_against_schema(schema_props, "starred", "true")
assert mock_bool.call_count == 1
with mock.patch("onecodex.metadata_upload.validate_datetime") as mock_datetime:
validate_metadata_against_schema(schema_props, "date_collected", "2018,05,08,12,12")
assert mock_datetime.call_count == 1
string_response = validate_metadata_against_schema(schema_props, "name", "foo")
assert string_response == "foo"
def test_validate_number_valid(ocx):
schema_props = schema_rules(ocx, "location_lat")
valid_number = validate_number("50", schema_props)
assert valid_number == 50
def test_validate_number_invalid_large(ocx):
schema_props = schema_rules(ocx, "location_lat")
with pytest.raises(ValidationError) as exception_info:
validate_number("200", schema_props)
assert "200 must be smaller than the maximum value: 90.0" in str(exception_info)
def test_validate_number_invalid_small(ocx):
schema_props = schema_rules(ocx, "location_lat")
with pytest.raises(ValidationError) as exception_info:
validate_number("-200", schema_props)
assert "-200 must be larger than the minimum value: -90.0" in str(exception_info)
def test_validate_enum_valid(ocx):
schema_props = schema_rules(ocx, "platform")
validate_enum("Illumina HiSeq", schema_props)
def test_validate_enum_invalid(ocx):
schema_props = schema_rules(ocx, "platform")
with pytest.raises(ValidationError) as exception_info:
validate_enum("Foo", schema_props)
assert "Foo is not a valid value for this key." in str(exception_info)
def test_validate_boolean_truthy():
validate_boolean("true")
validate_boolean("TRUE")
validate_boolean("T")
validate_boolean("1")
validate_boolean("Y")
validate_boolean("YES")
def test_validate_boolean_falsy():
validate_boolean("false")
validate_boolean("FALSE")
validate_boolean("F")
validate_boolean("0")
validate_boolean("N")
validate_boolean("No")
def test_validate_boolean_invalid():
with pytest.raises(ValidationError) as exception_info:
validate_boolean("FOO")
assert 'FOO must be either "true" or "false"' in str(exception_info)
def test_validate_datetime_valid():
validate_datetime("2018-05-15T16:21:36+00:00")
validate_datetime("2018-05-15T16:21:36")
def test_validate_datetime_invalid():
with pytest.raises(ValidationError) as exception_info:
validate_datetime("2018, 05, 15, 16, 21, 36")
assert (
'"2018, 05, 15, 16, 21, 36" must be formatted in iso8601 compliant date format. Example: "2018-05-15T16:21:36+00:00"'
in str(exception_info)
)
def test_is_blacklisted_valid():
blacklisted_resp = is_blacklisted("platform")
assert blacklisted_resp is False
def test_is_blacklisted_invalid():
blacklisted_resp = is_blacklisted("$uri")
assert blacklisted_resp is True
def test_coerce_custom_value_float():
custom_value = coerce_custom_value("42")
assert custom_value == 42.0
assert type(custom_value) == float
def test_coerce_custom_value_truthy():
custom_value = coerce_custom_value("True")
assert custom_value is True
def test_coerce_custom_value_falsy():
custom_value = coerce_custom_value("False")
assert custom_value is False
def test_coerce_custom_value_string():
custom_value = coerce_custom_value("Foo")
assert custom_value == "Foo"
def test_pandas_and_numpy_type_coercions(ocx, upload_mocks):
# See https://github.com/onecodex/onecodex/issues/232 and
# https://github.com/pandas-dev/pandas/issues/25969
# We can remove this test once the Pandas bug above is fixed
# upstream and we require >= that version of pandas.
pytest.importorskip("pandas")
import numpy as np
import pandas as pd
series = pd.Series({"a": np.int64(64), "b": 10, "c": "ABC"})
metadata = {
"platform": "Illumina NovaSeq 6000",
"date_collected": "2019-04-14T00:51:54.832048+00:00",
"external_sample_id": "my-lims-ID-or-similar",
"custom": series.to_dict(),
}
# This works
ocx.Samples._resource.init_multipart_upload(
filename="SRR2352185.fastq.gz", size=181687821, metadata=metadata
)
# Now try to serialize something really not supported, so we can get the Exception
with pytest.raises(TypeError):
metadata["custom"]["bad_field"] = ocx.Samples # not JSON serializable
ocx.Samples._resource.init_multipart_upload(
filename="SRR2352185.fastq.gz", size=181687821, metadata=metadata
)
| mit |
Garrett-R/scikit-learn | sklearn/cluster/bicluster.py | 38 | 19313 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array),
ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T),
ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
makersauce/stocks | strategy2.py | 1 | 8304 | ##Stragegy File
###INTERSECTING WATERFALL STRATEGY
###When 5,10,30 day moving averages intersect and make a triangle
## with 30 day on top, 10 day in middle and 5 day on the bottom,
## Buy at when the percent increase from MARKET to 30_DAY reaches threshold UNLESS the 5 day average exceeds the 10 day or 30 day.
## Sell when the 5 Day decreases
import datetime
from stock import Stock, piggy
from sys import argv
if __name__ == "__main__":
if len(argv) > 1:
if argv[1] == '--simulate':
if len(argv) < 3:
print 'Please specify symbol'
exit()
buy_flag = False; earn_flag = False; sell_flag = False;
symbol = argv[2]
stock = Stock(symbol)
stock.update_history()
stock.analyze()
buy_dates = []
buy_prices = []
sell_dates = []
sell_prices = []
wiggly = piggy(sim=True,holdings=300)
buy_flag = False
for itx, date in enumerate(stock.history_data['Date']):
ptrn_lookahead = 2
prox_thres = .04 * float(stock.history_data['Close'][itx])
gains_thres = 1.1
if float(stock.history_data['30_Day'][itx-ptrn_lookahead]) - prox_thres <= float(stock.history_data['10_Day'][itx-ptrn_lookahead]) <= float(stock.history_data['30_Day'][itx-ptrn_lookahead]) + prox_thres\
and float(stock.history_data['30_Day'][itx-ptrn_lookahead]) - prox_thres <= float(stock.history_data['5_Day'][itx-ptrn_lookahead]) <= float(stock.history_data['30_Day'][itx-ptrn_lookahead]) + prox_thres\
and float(stock.history_data['30_Day'][itx]) > float(stock.history_data['10_Day'][itx]) > float(stock.history_data['5_Day'][itx]):
buy_flag = True
slope_thres = .01 * float(stock.history_data['5_Day'][itx-3])
if float(stock.history_data['30_Day'][itx]) - float(stock.history_data['30_Day'][itx-1]) < float(stock.history_data['5_Day'][itx]) - float(stock.history_data['5_Day'][itx-1]) - slope_thres \
and not sell_flag:
buy_dates.append(date)
buy_prices.append(float(stock.history_data['Close'][itx]))
buy_flag = False
sell_flag = True
num = int(wiggly.holdings * .5 / float(stock.history_data['Close'][itx]))
wiggly.buy(stock,num,date=date)
buy_dates.append(date)
itx_slope_trig = itx
buy_prices.append(float(stock.history_data['Close'][itx]))
if buy_flag \
and float(stock.history_data['10_Day'][itx]) * gains_thres < float(stock.history_data['30_Day'][itx]): ##Once there is enough margin betweet the 5 day and 30 day, buy
buy_dates.append(date)
buy_prices.append(float(stock.history_data['Close'][itx]))
buy_flag = False
earn_flag = True
num = int(wiggly.holdings * .5 / float(stock.history_data['Close'][itx]))
wiggly.buy(stock,num,date=date)
buy_dates.append(date)
buy_prices.append(float(stock.history_data['Close'][itx]))
earn_thres = .20
if (earn_flag
and float(stock.history_data['30_Day'][itx]) * (1-earn_thres) < float(stock.history_data['5_Day'][itx]) ##If the 5 day gets close enough to the 30 day,
and float(stock.history_data['10_Day'][itx]) > float(stock.history_data['10_Day'][itx-1])): ## and the 10 Day is increasing, then throw the EARNING flag
earn_flag = False
sell_flag = True
sell_thres = .1
if (sell_flag
and float(stock.history_data['5_Day'][itx]) < float(stock.history_data['5_Day'][itx-1])
and float(stock.history_data['Close'][itx_slope_trig]) * (1+sell_thres) < float(stock.history_data['Close'][itx])): ##Once the 5 day decreases, pull the sell trigger
sell_flag = False
wiggly.sell(stock,-1,date=date)
sell_dates.append(date)
sell_prices.append(float(stock.history_data['Close'][itx]))
'''
## Sell if the 10_day drops
trigger_drop = .02
trigger_starved = 1.5
if float(stock.history_data['10_Day'][itx]) <= float(stock.history_data['10_Day'][itx-5]) - trigger_drop*float(stock.history_data['10_Day'][itx]) \
and bool(wiggly.current_stock.get(stock.symbol)) and wiggly.current_stock[stock.symbol] >= 1:
wiggly.sell(stock,-1,date=date)
sell_dates.append(date)
sell_prices.append(float(stock.history_data['Close'][itx]))
## Buy if the 10_day busts through the 30_day
if (float(stock.history_data['10_Day'][itx]) > float(stock.history_data['30_Day'][itx]) ## If the 10 Day was below the 30 day
and float(stock.history_data['10_Day'][itx-1]) < float(stock.history_data['30_Day'][itx-1]) ## and now the 10 day is above the 30
and float(stock.history_data['Open'][itx-1]) > float(stock.history_data['10_Day'][itx-1]) ## and the current value is above the 10 day
and float(stock.history_data['Close'][itx]) < (float(stock.history_data['30_Day'][itx]) * trigger_starved)
and wiggly.holdings > float(stock.history_data['Close'][itx]) + wiggly.broker.tradeFee): ## and I have enough money
num = int(wiggly.holdings * .5 / float(stock.history_data['Close'][itx]))
wiggly.buy(stock,num,date=date)
buy_dates.append(date)
buy_prices.append(float(stock.history_data['Close'][itx]))
'''
print "\n\n#####Closing Out######"
if wiggly.current_stock.keys():
wiggly.sell(stock,-1,date=date)
##Make a plot
import matplotlib.pyplot as plt
import matplotlib.dates as plotdate
import matplotlib.lines as line
import numpy as np
months = plotdate.MonthLocator() # every year
days = plotdate.DayLocator() # every month
monthsFmt = plotdate.DateFormatter('%m %d')
fig, ax = plt.subplots()
#ax2 = ax.twinx()
t = [datetime.datetime.strptime(date,'%Y-%m-%d') for date in stock.history_data['Date']]
ax.axis('auto')
# format the ticks
ax.xaxis.set_major_locator(months)
ax.xaxis.set_major_formatter(monthsFmt)
ax.xaxis.set_minor_locator(days)
fig.autofmt_xdate()
ax.plot(t, stock.history_data['5_Day'], '#0000FF')
ax.plot(t, stock.history_data['10_Day'], '#5555FF')
ax.plot(t, stock.history_data['30_Day'], '#9999FF')
#ax.plot(t, stock.history_data['80_Day'], '#AAAAFF')
#ax2.plot(t, stock.history_data['Volume'], '#CCFFCC')
#ax2.plot(t, stock.history_data['10_Day_Vol'], '#88AA88')
buy_dates = [datetime.datetime.strptime(date,'%Y-%m-%d') for date in buy_dates]
ax.plot(buy_dates,buy_prices, 'g|',ms=100)
sell_dates = [datetime.datetime.strptime(date,'%Y-%m-%d') for date in sell_dates]
ax.plot(sell_dates,sell_prices, 'b|',ms=100)
ax.plot(t, stock.history_data['Close'], 'r-')
plt.title(stock.symbol)
#ax.text(t[12], 250, 'hello')
plt.show()
elif argv[1] == '--deploy':
print 'Sorry, Deploy function not ready yet. Try some more simulations'
elif argv[1] == '--help' or argv[1] == '-h':
print 'Sorry, can\'t help you yet'
else:
print 'Sorry, ' + argv[1] + 'is not a valid argument. Try \'-h\' for help'
else:
print 'Invalid Number of Arguments'
print 'try \"--help\" for information on this module'
| mit |
BiaDarkia/scikit-learn | examples/feature_selection/plot_feature_selection_pipeline.py | 58 | 1049 | """
==================
Pipeline Anova SVM
==================
Simple usage of Pipeline that runs successively a univariate
feature selection with anova and then a C-SVM of the selected features.
"""
from sklearn import svm
from sklearn.datasets import samples_generator
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
print(__doc__)
# import some data to play with
X, y = samples_generator.make_classification(
n_features=20, n_informative=3, n_redundant=0, n_classes=4,
n_clusters_per_class=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# ANOVA SVM-C
# 1) anova filter, take 3 best ranked features
anova_filter = SelectKBest(f_regression, k=3)
# 2) svm
clf = svm.SVC(kernel='linear')
anova_svm = make_pipeline(anova_filter, clf)
anova_svm.fit(X_train, y_train)
y_pred = anova_svm.predict(X_test)
print(classification_report(y_test, y_pred))
| bsd-3-clause |
Windy-Ground/scikit-learn | examples/datasets/plot_iris_dataset.py | 283 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
jaidevd/jarvis | jarvis/aerospike_connector.py | 1 | 1196 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 jaidev <jaidev@newton>
#
# Distributed under terms of the MIT license.
"""Tools for pushing pandas dataframes into aerospike servers."""
import aerospike
class DataFrameExporter(object):
def __init__(self, namespace=None, dataframe=None, set_name=None,
hostname=None, port=3000):
self.namespace = namespace
self.dataframe = dataframe
self.set_name = set_name
self.hostname = hostname
self.port = port
def run(self):
client = aerospike.client({'hosts': (self.hostname, self.port)})
client = client.connect()
for i in range(10):
self.client.put((self.namespace, self.set_name, i),
self.dataframe.ix[self.dataframe.index[i]].to_dict())
print i
self.client.close()
if __name__ == '__main__':
from pysemantic import Project
vf = Project("vfirst")
mtlogs = vf.load_dataset("mtlogs")
exporter = DataFrameExporter(namespace="vfirst", dataframe="vfirst",
set_name="mtlogs", hostname="turn.key")
exporter.run()
| bsd-3-clause |
rgommers/numpy | numpy/core/code_generators/ufunc_docstrings.py | 7 | 106598 | """
Docstrings for generated ufuncs
The syntax is designed to look like the function add_newdoc is being
called from numpy.lib, but in this file add_newdoc puts the docstrings
in a dictionary. This dictionary is used in
numpy/core/code_generators/generate_umath.py to generate the docstrings
for the ufuncs in numpy.core at the C level when the ufuncs are created
at compile time.
"""
import textwrap
docdict = {}
def get(name):
return docdict.get(name)
# common parameter text to all ufuncs
subst = {
'PARAMS': textwrap.dedent("""
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or None,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
where : array_like, optional
This condition is broadcast over the input. At locations where the
condition is True, the `out` array will be set to the ufunc result.
Elsewhere, the `out` array will retain its original value.
Note that if an uninitialized `out` array is created via the default
``out=None``, locations within it where the condition is False will
remain uninitialized.
**kwargs
For other keyword-only arguments, see the
:ref:`ufunc docs <ufuncs.kwargs>`.
""").strip(),
'BROADCASTABLE_2': ("If ``x1.shape != x2.shape``, they must be "
"broadcastable to a common\n shape (which becomes "
"the shape of the output)."),
'OUT_SCALAR_1': "This is a scalar if `x` is a scalar.",
'OUT_SCALAR_2': "This is a scalar if both `x1` and `x2` are scalars.",
}
def add_newdoc(place, name, doc):
doc = textwrap.dedent(doc).strip()
skip = (
# gufuncs do not use the OUT_SCALAR replacement strings
'matmul',
# clip has 3 inputs, which is not handled by this
'clip',
)
if name[0] != '_' and name not in skip:
if '\nx :' in doc:
assert '$OUT_SCALAR_1' in doc, "in {}".format(name)
elif '\nx2 :' in doc or '\nx1, x2 :' in doc:
assert '$OUT_SCALAR_2' in doc, "in {}".format(name)
else:
assert False, "Could not detect number of inputs in {}".format(name)
for k, v in subst.items():
doc = doc.replace('$' + k, v)
docdict['.'.join((place, name))] = doc
add_newdoc('numpy.core.umath', 'absolute',
"""
Calculate the absolute value element-wise.
``np.abs`` is a shorthand for this function.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
$OUT_SCALAR_1
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
>>> np.absolute(1.2 + 1j)
1.5620499351813308
Plot the function over ``[-10, 10]``:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(start=-10, stop=10, num=101)
>>> plt.plot(x, np.absolute(x))
>>> plt.show()
Plot the function over the complex plane:
>>> xx = x + 1j * x[:, np.newaxis]
>>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10], cmap='gray')
>>> plt.show()
The `abs` function can be used as a shorthand for ``np.absolute`` on
ndarrays.
>>> x = np.array([-1.2, 1.2])
>>> abs(x)
array([1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'add',
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be added.
$BROADCASTABLE_2
$PARAMS
Returns
-------
add : ndarray or scalar
The sum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
Notes
-----
Equivalent to `x1` + `x2` in terms of array broadcasting.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
The ``+`` operator can be used as a shorthand for ``np.add`` on ndarrays.
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> x1 + x2
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
""")
add_newdoc('numpy.core.umath', 'arccos',
"""
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``.
Parameters
----------
x : array_like
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
$PARAMS
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`x`-coordinate in radians [0, pi].
$OUT_SCALAR_1
See Also
--------
cos, arctan, arcsin, emath.arccos
Notes
-----
`arccos` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that ``cos(z) = x``. The convention is to return
the angle `z` whose real part lies in `[0, pi]`.
For real-valued input data types, `arccos` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytic function that
has branch cuts ``[-inf, -1]`` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse `cos` is also known as `acos` or cos^-1.
References
----------
M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
Plot arccos:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-1, 1, num=100)
>>> plt.plot(x, np.arccos(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arccosh',
"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
$OUT_SCALAR_1
See Also
--------
cosh, arcsinh, sinh, arctanh, tanh
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`. The convention is to return the
`z` whose imaginary part lies in ``[-pi, pi]`` and the real part in
``[0, inf]``.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccosh` is a complex analytical function that
has a branch cut `[-inf, 1]` and is continuous from above on it.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
https://en.wikipedia.org/wiki/Arccosh
Examples
--------
>>> np.arccosh([np.e, 10.0])
array([ 1.65745445, 2.99322285])
>>> np.arccosh(1)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsin',
"""
Inverse sine, element-wise.
Parameters
----------
x : array_like
`y`-coordinate on the unit circle.
$PARAMS
Returns
-------
angle : ndarray
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``.
$OUT_SCALAR_1
See Also
--------
sin, cos, arccos, tan, arctan, arctan2, emath.arcsin
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arcsin` is a complex analytic function that
has, by convention, the branch cuts [-inf, -1] and [1, inf] and is
continuous from above on the former and from below on the latter.
The inverse sine is also known as `asin` or sin^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsinh',
"""
Inverse hyperbolic sine element-wise.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
out : ndarray or scalar
Array of the same shape as `x`.
$OUT_SCALAR_1
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
returns ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from
the right on the former and from the left on the latter.
The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
https://en.wikipedia.org/wiki/Arcsinh
Examples
--------
>>> np.arcsinh(np.array([np.e, 10.0]))
array([ 1.72538256, 2.99822295])
""")
add_newdoc('numpy.core.umath', 'arctan',
"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : array_like
$PARAMS
Returns
-------
out : ndarray or scalar
Out has the same shape as `x`. Its real part is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
$OUT_SCALAR_1
See Also
--------
arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`)
and the positive `x`-axis.
angle : Argument of complex values.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctan` is a complex analytic function that
has [``1j, infj``] and [``-1j, -infj``] as branch cuts, and is continuous
from the left on the former and from the right on the latter.
The inverse tangent is also known as `atan` or tan^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be pi/4:
>>> np.arctan([0, 1])
array([ 0. , 0.78539816])
>>> np.pi/4
0.78539816339744828
Plot arctan:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10)
>>> plt.plot(x, np.arctan(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arctan2',
"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
`x`-coordinates.
$BROADCASTABLE_2
$PARAMS
Returns
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
$OUT_SCALAR_2
See Also
--------
arctan, tan, angle
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
array([ 0. , 3.14159265, 0.78539816])
""")
add_newdoc('numpy.core.umath', '_arg',
"""
DO NOT USE, ONLY FOR TESTING
""")
add_newdoc('numpy.core.umath', 'arctanh',
"""
Inverse hyperbolic tangent element-wise.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
out : ndarray or scalar
Array of the same shape as `x`.
$OUT_SCALAR_1
See Also
--------
emath.arctanh
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that ``tanh(z) = x``. The convention is to return
the `z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function
that has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
https://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> np.arctanh([0, -0.5])
array([ 0. , -0.54930614])
""")
add_newdoc('numpy.core.umath', 'bitwise_and',
"""
Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``&``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Result.
$OUT_SCALAR_2
See Also
--------
logical_and
bitwise_or
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise AND of 13 and 17 is
therefore ``000000001``, or 1:
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.binary_repr(12)
'1100'
>>> np.bitwise_and([14,3], 13)
array([12, 1])
>>> np.bitwise_and([11,7], [4,25])
array([0, 1])
>>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
array([ 2, 4, 16])
>>> np.bitwise_and([True, True], [False, True])
array([False, True])
The ``&`` operator can be used as a shorthand for ``np.bitwise_and`` on
ndarrays.
>>> x1 = np.array([2, 5, 255])
>>> x2 = np.array([3, 14, 16])
>>> x1 & x2
array([ 2, 4, 16])
""")
add_newdoc('numpy.core.umath', 'bitwise_or',
"""
Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``|``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Result.
$OUT_SCALAR_2
See Also
--------
logical_or
bitwise_and
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 has the binaray representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
>>> np.bitwise_or(13, 16)
29
>>> np.binary_repr(29)
'11101'
>>> np.bitwise_or(32, 2)
34
>>> np.bitwise_or([33, 4], 1)
array([33, 5])
>>> np.bitwise_or([33, 4], [1, 2])
array([33, 6])
>>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
>>> np.bitwise_or(np.array([2, 5, 255, 2147483647], dtype=np.int32),
... np.array([4, 4, 4, 2147483647], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True])
The ``|`` operator can be used as a shorthand for ``np.bitwise_or`` on
ndarrays.
>>> x1 = np.array([2, 5, 255])
>>> x2 = np.array([4, 4, 4])
>>> x1 | x2
array([ 6, 5, 255])
""")
add_newdoc('numpy.core.umath', 'bitwise_xor',
"""
Compute the bit-wise XOR of two arrays element-wise.
Computes the bit-wise XOR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``^``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Result.
$OUT_SCALAR_2
See Also
--------
logical_xor
bitwise_and
bitwise_or
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise XOR of 13 and 17 is
therefore ``00011100``, or 28:
>>> np.bitwise_xor(13, 17)
28
>>> np.binary_repr(28)
'11100'
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor([31,3], 5)
array([26, 6])
>>> np.bitwise_xor([31,3], [5,6])
array([26, 5])
>>> np.bitwise_xor([True, True], [False, True])
array([ True, False])
The ``^`` operator can be used as a shorthand for ``np.bitwise_xor`` on
ndarrays.
>>> x1 = np.array([True, True])
>>> x2 = np.array([False, True])
>>> x1 ^ x2
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'ceil',
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
``i >= x``. It is often denoted as :math:`\\lceil x \\rceil`.
Parameters
----------
x : array_like
Input data.
$PARAMS
Returns
-------
y : ndarray or scalar
The ceiling of each element in `x`, with `float` dtype.
$OUT_SCALAR_1
See Also
--------
floor, trunc, rint, fix
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'trunc',
"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : array_like
Input data.
$PARAMS
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
$OUT_SCALAR_1
See Also
--------
ceil, floor, rint, fix
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'conjugate',
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Parameters
----------
x : array_like
Input value.
$PARAMS
Returns
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
$OUT_SCALAR_1
Notes
-----
`conj` is an alias for `conjugate`:
>>> np.conj is np.conjugate
True
Examples
--------
>>> np.conjugate(1+2j)
(1-2j)
>>> x = np.eye(2) + 1j * np.eye(2)
>>> np.conjugate(x)
array([[ 1.-1.j, 0.-0.j],
[ 0.-0.j, 1.-1.j]])
""")
add_newdoc('numpy.core.umath', 'cos',
"""
Cosine element-wise.
Parameters
----------
x : array_like
Input array in radians.
$PARAMS
Returns
-------
y : ndarray
The corresponding cosine values.
$OUT_SCALAR_1
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='d')
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
add_newdoc('numpy.core.umath', 'cosh',
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
out : ndarray or scalar
Output array of same shape as `x`.
$OUT_SCALAR_1
Examples
--------
>>> np.cosh(0)
1.0
The hyperbolic cosine describes the shape of a hanging cable:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 1000)
>>> plt.plot(x, np.cosh(x))
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'degrees',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Input array in radians.
$PARAMS
Returns
-------
y : ndarray of floats
The corresponding degree values; if `out` was supplied this is a
reference to it.
$OUT_SCALAR_1
See Also
--------
rad2deg : equivalent function
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.)*np.pi/6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330.])
>>> out = np.zeros((rad.shape))
>>> r = np.degrees(rad, out)
>>> np.all(r == out)
True
""")
add_newdoc('numpy.core.umath', 'rad2deg',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Angle in radians.
$PARAMS
Returns
-------
y : ndarray
The corresponding angle in degrees.
$OUT_SCALAR_1
See Also
--------
deg2rad : Convert angles from degrees to radians.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
rad2deg(x) is ``180 * x / pi``.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
""")
add_newdoc('numpy.core.umath', 'heaviside',
"""
Compute the Heaviside step function.
The Heaviside step function is defined as::
0 if x1 < 0
heaviside(x1, x2) = x2 if x1 == 0
1 if x1 > 0
where `x2` is often taken to be 0.5, but 0 and 1 are also sometimes used.
Parameters
----------
x1 : array_like
Input values.
x2 : array_like
The value of the function when x1 is 0.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
The output array, element-wise Heaviside step function of `x1`.
$OUT_SCALAR_2
Notes
-----
.. versionadded:: 1.13.0
References
----------
.. Wikipedia, "Heaviside step function",
https://en.wikipedia.org/wiki/Heaviside_step_function
Examples
--------
>>> np.heaviside([-1.5, 0, 2.0], 0.5)
array([ 0. , 0.5, 1. ])
>>> np.heaviside([-1.5, 0, 2.0], 1)
array([ 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'divide',
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The quotient ``x1/x2``, element-wise.
$OUT_SCALAR_2
See Also
--------
seterr : Set whether to raise or warn on overflow, underflow and
division by zero.
Notes
-----
Equivalent to ``x1`` / ``x2`` in terms of array-broadcasting.
Behavior on division by zero can be changed using ``seterr``.
In Python 2, when both ``x1`` and ``x2`` are of an integer type,
``divide`` will behave like ``floor_divide``. In Python 3, it behaves
like ``true_divide``.
Examples
--------
>>> np.divide(2.0, 4.0)
0.5
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types (Python 2 only):
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
Division by zero always yields zero in integer arithmetic (again,
Python 2 only), and does not raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
Division by zero can, however, be caught using ``seterr``:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
The ``/`` operator can be used as a shorthand for ``np.divide`` on
ndarrays.
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = 2 * np.ones(3)
>>> x1 / x2
array([[0. , 0.5, 1. ],
[1.5, 2. , 2.5],
[3. , 3.5, 4. ]])
""")
add_newdoc('numpy.core.umath', 'equal',
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal([0, 1, 3], np.arange(3))
array([ True, True, False])
What is compared are values, not types. So an int (1) and an array of
length one can evaluate as True:
>>> np.equal(1, np.ones(1))
array([ True])
The ``==`` operator can be used as a shorthand for ``np.equal`` on
ndarrays.
>>> a = np.array([2, 4, 6])
>>> b = np.array([2, 4, 2])
>>> a == b
array([ True, True, False])
""")
add_newdoc('numpy.core.umath', 'exp',
"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise exponential of `x`.
$OUT_SCALAR_1
See Also
--------
expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
The irrational number ``e`` is also known as Euler's number. It is
approximately 2.718281, and is the base of the natural logarithm,
``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`,
then :math:`e^x = y`. For real input, ``exp(x)`` is always positive.
For complex arguments, ``x = a + ib``, we can write
:math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already
known (it is the real argument, described above). The second term,
:math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with
magnitude 1 and a periodic phase.
References
----------
.. [1] Wikipedia, "Exponential function",
https://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
Examples
--------
Plot the magnitude and phase of ``exp(x)`` in the complex plane:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2*np.pi, 2*np.pi, 100)
>>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane
>>> out = np.exp(xx)
>>> plt.subplot(121)
>>> plt.imshow(np.abs(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi], cmap='gray')
>>> plt.title('Magnitude of exp(x)')
>>> plt.subplot(122)
>>> plt.imshow(np.angle(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi], cmap='hsv')
>>> plt.title('Phase (angle) of exp(x)')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'exp2',
"""
Calculate `2**p` for all `p` in the input array.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
out : ndarray or scalar
Element-wise 2 to the power `x`.
$OUT_SCALAR_1
See Also
--------
power
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> np.exp2([2, 3])
array([ 4., 8.])
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
out : ndarray or scalar
Element-wise exponential minus one: ``out = exp(x) - 1``.
$OUT_SCALAR_1
See Also
--------
log1p : ``log(1 + x)``, the inverse of expm1.
Notes
-----
This function provides greater precision than ``exp(x) - 1``
for small values of ``x``.
Examples
--------
The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to
about 32 significant digits. This example shows the superiority of
expm1 in this case.
>>> np.expm1(1e-10)
1.00000000005e-10
>>> np.exp(1e-10) - 1
1.000000082740371e-10
""")
add_newdoc('numpy.core.umath', 'fabs',
"""
Compute the absolute values element-wise.
This function returns the absolute values (positive magnitude) of the
data in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : array_like
The array of numbers for which the absolute values are required. If
`x` is a scalar, the result `y` will also be a scalar.
$PARAMS
Returns
-------
y : ndarray or scalar
The absolute values of `x`, the returned values are always floats.
$OUT_SCALAR_1
See Also
--------
absolute : Absolute values including `complex` types.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs([-1.2, 1.2])
array([ 1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'floor',
"""
Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.
Parameters
----------
x : array_like
Input data.
$PARAMS
Returns
-------
y : ndarray or scalar
The floor of each element in `x`.
$OUT_SCALAR_1
See Also
--------
ceil, trunc, rint, fix
Notes
-----
Some spreadsheet programs calculate the "floor-towards-zero", where
``floor(-2.5) == -2``. NumPy instead uses the definition of
`floor` where `floor(-2.5) == -3`. The "floor-towards-zero"
function is called ``fix`` in NumPy.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the inputs.
It is equivalent to the Python ``//`` operator and pairs with the
Python ``%`` (`remainder`), function so that ``a = a % b + b * (a // b)``
up to roundoff.
Parameters
----------
x1 : array_like
Numerator.
x2 : array_like
Denominator.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
y = floor(`x1`/`x2`)
$OUT_SCALAR_2
See Also
--------
remainder : Remainder complementary to floor_divide.
divmod : Simultaneous floor division and remainder.
divide : Standard division.
floor : Round a number to the nearest integer toward minus infinity.
ceil : Round a number to the nearest integer toward infinity.
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
The ``//`` operator can be used as a shorthand for ``np.floor_divide``
on ndarrays.
>>> x1 = np.array([1., 2., 3., 4.])
>>> x1 // 2.5
array([0., 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'fmod',
"""
Return the element-wise remainder of division.
This is the NumPy implementation of the C library function fmod, the
remainder has the same sign as the dividend `x1`. It is equivalent to
the Matlab(TM) ``rem`` function and should not be confused with the
Python modulus operator ``x1 % x2``.
Parameters
----------
x1 : array_like
Dividend.
x2 : array_like
Divisor.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : array_like
The remainder of the division of `x1` by `x2`.
$OUT_SCALAR_2
See Also
--------
remainder : Equivalent to the Python ``%`` operator.
divide
Notes
-----
The result of the modulo operation for negative dividend and divisors
is bound by conventions. For `fmod`, the sign of result is the sign of
the dividend, while for `remainder` the sign of the result is the sign
of the divisor. The `fmod` function is equivalent to the Matlab(TM)
``rem`` function.
Examples
--------
>>> np.fmod([-3, -2, -1, 1, 2, 3], 2)
array([-1, 0, -1, 1, 0, 1])
>>> np.remainder([-3, -2, -1, 1, 2, 3], 2)
array([1, 0, 1, 1, 0, 1])
>>> np.fmod([5, 3], [2, 2.])
array([ 1., 1.])
>>> a = np.arange(-3, 3).reshape(3, 2)
>>> a
array([[-3, -2],
[-1, 0],
[ 1, 2]])
>>> np.fmod(a, [2,2])
array([[-1, 0],
[-1, 0],
[ 1, 0]])
""")
add_newdoc('numpy.core.umath', 'greater',
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
greater_equal, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater([4,2],[2,2])
array([ True, False])
The ``>`` operator can be used as a shorthand for ``np.greater`` on
ndarrays.
>>> a = np.array([4, 2])
>>> b = np.array([2, 2])
>>> a > b
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'greater_equal',
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : bool or ndarray of bool
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
greater, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater_equal([4, 2, 1], [2, 2, 2])
array([ True, True, False])
The ``>=`` operator can be used as a shorthand for ``np.greater_equal``
on ndarrays.
>>> a = np.array([4, 2, 1])
>>> b = np.array([2, 2, 2])
>>> a >= b
array([ True, True, False])
""")
add_newdoc('numpy.core.umath', 'hypot',
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
$BROADCASTABLE_2
$PARAMS
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
$OUT_SCALAR_2
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
""")
add_newdoc('numpy.core.umath', 'invert',
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
For signed integer inputs, the two's complement is returned. In a
two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit
two's-complement system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
x : array_like
Only integer and boolean types are handled.
$PARAMS
Returns
-------
out : ndarray or scalar
Result.
$OUT_SCALAR_1
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
References
----------
.. [1] Wikipedia, "Two's complement",
https://en.wikipedia.org/wiki/Two's_complement
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> x = np.invert(np.array(13, dtype=np.uint8))
>>> x
242
>>> np.binary_repr(x, width=8)
'11110010'
The result depends on the bit-width:
>>> x = np.invert(np.array(13, dtype=np.uint16))
>>> x
65522
>>> np.binary_repr(x, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
>>> np.invert(np.array([13], dtype=np.int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
>>> np.invert(np.array([True, False]))
array([False, True])
The ``~`` operator can be used as a shorthand for ``np.invert`` on
ndarrays.
>>> x1 = np.array([True, False])
>>> ~x1
array([False, True])
""")
add_newdoc('numpy.core.umath', 'isfinite',
"""
Test element-wise for finiteness (not infinity or not Not a Number).
The result is returned as a boolean array.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray, bool
True where ``x`` is not positive infinity, negative infinity,
or NaN; false otherwise.
$OUT_SCALAR_1
See Also
--------
isinf, isneginf, isposinf, isnan
Notes
-----
Not a Number, positive infinity and negative infinity are considered
to be non-finite.
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity. Errors result if the
second argument is also supplied when `x` is a scalar input, or if
first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(np.NINF)
False
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
array([False, True, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isfinite(x, y)
array([0, 1, 0])
>>> y
array([0, 1, 0])
""")
add_newdoc('numpy.core.umath', 'isinf',
"""
Test element-wise for positive or negative infinity.
Returns a boolean array of the same shape as `x`, True where ``x ==
+/-inf``, otherwise False.
Parameters
----------
x : array_like
Input values
$PARAMS
Returns
-------
y : bool (scalar) or boolean ndarray
True where ``x`` is positive or negative infinity, false otherwise.
$OUT_SCALAR_1
See Also
--------
isneginf, isposinf, isnan, isfinite
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is supplied when the first
argument is a scalar, or if the first and second arguments have
different shapes.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.NINF)
True
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
array([ True, True, False, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isinf(x, y)
array([1, 0, 1])
>>> y
array([1, 0, 1])
""")
add_newdoc('numpy.core.umath', 'isnan',
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray or bool
True where ``x`` is NaN, false otherwise.
$OUT_SCALAR_1
See Also
--------
isinf, isneginf, isposinf, isfinite, isnat
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan([np.log(-1.),1.,np.log(0)])
array([ True, False, False])
""")
add_newdoc('numpy.core.umath', 'isnat',
"""
Test element-wise for NaT (not a time) and return result as a boolean array.
.. versionadded:: 1.13.0
Parameters
----------
x : array_like
Input array with datetime or timedelta data type.
$PARAMS
Returns
-------
y : ndarray or bool
True where ``x`` is NaT, false otherwise.
$OUT_SCALAR_1
See Also
--------
isnan, isinf, isneginf, isposinf, isfinite
Examples
--------
>>> np.isnat(np.datetime64("NaT"))
True
>>> np.isnat(np.datetime64("2016-01-01"))
False
>>> np.isnat(np.array(["NaT", "2016-01-01"], dtype="datetime64[ns]"))
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'left_shift',
"""
Shift the bits of an integer to the left.
Bits are shifted to the left by appending `x2` 0s at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x1` by ``2**x2``.
Parameters
----------
x1 : array_like of integer type
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
$OUT_SCALAR_2
See Also
--------
right_shift : Shift the bits of an integer to the right.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(5)
'101'
>>> np.left_shift(5, 2)
20
>>> np.binary_repr(20)
'10100'
>>> np.left_shift(5, [1,2,3])
array([10, 20, 40])
Note that the dtype of the second argument may change the dtype of the
result and can lead to unexpected results in some cases (see
:ref:`Casting Rules <ufuncs.casting>`):
>>> a = np.left_shift(np.uint8(255), 1) # Expect 254
>>> print(a, type(a)) # Unexpected result due to upcasting
510 <class 'numpy.int64'>
>>> b = np.left_shift(np.uint8(255), np.uint8(1))
>>> print(b, type(b))
254 <class 'numpy.uint8'>
The ``<<`` operator can be used as a shorthand for ``np.left_shift`` on
ndarrays.
>>> x1 = 5
>>> x2 = np.array([1, 2, 3])
>>> x1 << x2
array([10, 20, 40])
""")
add_newdoc('numpy.core.umath', 'less',
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
greater, less_equal, greater_equal, equal, not_equal
Examples
--------
>>> np.less([1, 2], [2, 2])
array([ True, False])
The ``<`` operator can be used as a shorthand for ``np.less`` on ndarrays.
>>> a = np.array([1, 2])
>>> b = np.array([2, 2])
>>> a < b
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'less_equal',
"""
Return the truth value of (x1 <= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
greater, less, greater_equal, equal, not_equal
Examples
--------
>>> np.less_equal([4, 2, 1], [2, 2, 2])
array([False, True, True])
The ``<=`` operator can be used as a shorthand for ``np.less_equal`` on
ndarrays.
>>> a = np.array([4, 2, 1])
>>> b = np.array([2, 2, 2])
>>> a <= b
array([False, True, True])
""")
add_newdoc('numpy.core.umath', 'log',
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : array_like
Input value.
$PARAMS
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
$OUT_SCALAR_1
See Also
--------
log10, log2, log1p, emath.log
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log([1, np.e, np.e**2, 0])
array([ 0., 1., 2., -Inf])
""")
add_newdoc('numpy.core.umath', 'log10',
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
$OUT_SCALAR_1
See Also
--------
emath.log10
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `10**z = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log10` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log10` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it.
`log10` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log10([1e-15, -3.])
array([-15., nan])
""")
add_newdoc('numpy.core.umath', 'log2',
"""
Base-2 logarithm of `x`.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray
Base-2 logarithm of `x`.
$OUT_SCALAR_1
See Also
--------
log, log10, log1p, emath.log2
Notes
-----
.. versionadded:: 1.3.0
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `2**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log2` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log2` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log2`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-Inf, 0., 1., 4.])
>>> xi = np.array([0+1.j, 1, 2+0.j, 4.j])
>>> np.log2(xi)
array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j])
""")
add_newdoc('numpy.core.umath', 'logaddexp',
"""
Logarithm of the sum of exponentiations of the inputs.
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
statistics where the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the logarithm of the calculated probability is stored. This function
allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
$BROADCASTABLE_2
$PARAMS
Returns
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
$OUT_SCALAR_2
See Also
--------
logaddexp2: Logarithm of the sum of exponentiations of inputs in base 2.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log(1e-50)
>>> prob2 = np.log(2.5e-50)
>>> prob12 = np.logaddexp(prob1, prob2)
>>> prob12
-113.87649168120691
>>> np.exp(prob12)
3.5000000000000057e-50
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
learning when the calculated probabilities of events may be so small as
to exceed the range of normal floating point numbers. In such cases
the base-2 logarithm of the calculated probability can be used instead.
This function allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
$BROADCASTABLE_2
$PARAMS
Returns
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
$OUT_SCALAR_2
See Also
--------
logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log2(1e-50)
>>> prob2 = np.log2(2.5e-50)
>>> prob12 = np.logaddexp2(prob1, prob2)
>>> prob1, prob2, prob12
(-166.09640474436813, -164.77447664948076, -164.28904982231052)
>>> 2**prob12
3.4999999999999914e-50
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
$OUT_SCALAR_1
See Also
--------
expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log1p` is a complex analytical function that
has a branch cut `[-inf, -1]` and is continuous from above on it.
`log1p` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> np.log(1 + 1e-99)
0.0
""")
add_newdoc('numpy.core.umath', 'logical_and',
"""
Compute the truth value of x1 AND x2 element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or bool
Boolean result of the logical AND operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
$OUT_SCALAR_2
See Also
--------
logical_or, logical_not, logical_xor
bitwise_and
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and([True, False], [False, False])
array([False, False])
>>> x = np.arange(5)
>>> np.logical_and(x>1, x<4)
array([False, False, True, True, False])
The ``&`` operator can be used as a shorthand for ``np.logical_and`` on
boolean ndarrays.
>>> a = np.array([True, False])
>>> b = np.array([False, False])
>>> a & b
array([False, False])
""")
add_newdoc('numpy.core.umath', 'logical_not',
"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : array_like
Logical NOT is applied to the elements of `x`.
$PARAMS
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
$OUT_SCALAR_1
See Also
--------
logical_and, logical_or, logical_xor
Examples
--------
>>> np.logical_not(3)
False
>>> np.logical_not([True, False, 0, 1])
array([False, True, True, False])
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True])
""")
add_newdoc('numpy.core.umath', 'logical_or',
"""
Compute the truth value of x1 OR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or bool
Boolean result of the logical OR operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
$OUT_SCALAR_2
See Also
--------
logical_and, logical_not, logical_xor
bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or([True, False], [False, False])
array([ True, False])
>>> x = np.arange(5)
>>> np.logical_or(x < 1, x > 3)
array([ True, False, False, False, True])
The ``|`` operator can be used as a shorthand for ``np.logical_or`` on
boolean ndarrays.
>>> a = np.array([True, False])
>>> b = np.array([False, False])
>>> a | b
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'logical_xor',
"""
Compute the truth value of x1 XOR x2, element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : bool or ndarray of bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
$OUT_SCALAR_2
See Also
--------
logical_and, logical_or, logical_not, bitwise_xor
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor([True, True, False, False], [True, False, True, False])
array([False, True, True, False])
>>> x = np.arange(5)
>>> np.logical_xor(x < 1, x > 3)
array([ True, False, False, False, True])
Simple example showing support of broadcasting
>>> np.logical_xor(0, np.eye(2))
array([[ True, False],
[False, True]])
""")
add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
See Also
--------
minimum :
Element-wise minimum of two arrays, propagates NaNs.
fmax :
Element-wise maximum of two arrays, ignores NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
fmin, amin, nanmin
Notes
-----
The maximum is equivalent to ``np.where(x1 >= x2, x1, x2)`` when
neither x1 nor x2 are nans, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.maximum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
array([nan, nan, nan])
>>> np.maximum(np.Inf, 1)
inf
""")
add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
See Also
--------
maximum :
Element-wise maximum of two arrays, propagates NaNs.
fmin :
Element-wise minimum of two arrays, ignores NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
fmax, amax, nanmax
Notes
-----
The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when
neither x1 nor x2 are NaNs, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.minimum([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([nan, nan, nan])
>>> np.minimum(-np.Inf, 1)
-inf
""")
add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
See Also
--------
fmin :
Element-wise minimum of two arrays, ignores NaNs.
maximum :
Element-wise maximum of two arrays, propagates NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
minimum, amin, nanmin
Notes
-----
.. versionadded:: 1.3.0
The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmax([2, 3, 4], [1, 5, 2])
array([ 2., 5., 4.])
>>> np.fmax(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., nan])
""")
add_newdoc('numpy.core.umath', 'fmin',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
See Also
--------
fmax :
Element-wise maximum of two arrays, ignores NaNs.
minimum :
Element-wise minimum of two arrays, propagates NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
maximum, amax, nanmax
Notes
-----
.. versionadded:: 1.3.0
The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.fmin(np.eye(2), [0.5, 2])
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., nan])
""")
add_newdoc('numpy.core.umath', 'clip',
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Equivalent to but faster than ``np.minimum(np.maximum(a, a_min), a_max)``.
Parameters
----------
a : array_like
Array containing elements to clip.
a_min : array_like
Minimum value.
a_max : array_like
Maximum value.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
$PARAMS
See Also
--------
numpy.clip :
Wrapper that makes the ``a_min`` and ``a_max`` arguments optional,
dispatching to one of `~numpy.core.umath.clip`,
`~numpy.core.umath.minimum`, and `~numpy.core.umath.maximum`.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
""")
add_newdoc('numpy.core.umath', 'matmul',
"""
Matrix product of two arrays.
Parameters
----------
x1, x2 : array_like
Input arrays, scalars not allowed.
out : ndarray, optional
A location into which the result is stored. If provided, it must have
a shape that matches the signature `(n,k),(k,m)->(n,m)`. If not
provided or None, a freshly-allocated array is returned.
**kwargs
For other keyword-only arguments, see the
:ref:`ufunc docs <ufuncs.kwargs>`.
.. versionadded:: 1.16
Now handles ufunc kwargs
Returns
-------
y : ndarray
The matrix product of the inputs.
This is a scalar only when both x1, x2 are 1-d vectors.
Raises
------
ValueError
If the last dimension of `x1` is not the same size as
the second-to-last dimension of `x2`.
If a scalar value is passed in.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
dot : alternative matrix product with different broadcasting rules.
Notes
-----
The behavior depends on the arguments in the following way.
- If both arguments are 2-D they are multiplied like conventional
matrices.
- If either argument is N-D, N > 2, it is treated as a stack of
matrices residing in the last two indexes and broadcast accordingly.
- If the first argument is 1-D, it is promoted to a matrix by
prepending a 1 to its dimensions. After matrix multiplication
the prepended 1 is removed.
- If the second argument is 1-D, it is promoted to a matrix by
appending a 1 to its dimensions. After matrix multiplication
the appended 1 is removed.
``matmul`` differs from ``dot`` in two important ways:
- Multiplication by scalars is not allowed, use ``*`` instead.
- Stacks of matrices are broadcast together as if the matrices
were elements, respecting the signature ``(n,k),(k,m)->(n,m)``:
>>> a = np.ones([9, 5, 7, 4])
>>> c = np.ones([9, 5, 4, 3])
>>> np.dot(a, c).shape
(9, 5, 7, 9, 5, 3)
>>> np.matmul(a, c).shape
(9, 5, 7, 3)
>>> # n is 7, k is 4, m is 3
The matmul function implements the semantics of the ``@`` operator introduced
in Python 3.5 following :pep:`465`.
Examples
--------
For 2-D arrays it is the matrix product:
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([[4, 1],
... [2, 2]])
>>> np.matmul(a, b)
array([[4, 1],
[2, 2]])
For 2-D mixed with 1-D, the result is the usual.
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([1, 2])
>>> np.matmul(a, b)
array([1, 2])
>>> np.matmul(b, a)
array([1, 2])
Broadcasting is conventional for stacks of arrays
>>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4))
>>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2))
>>> np.matmul(a,b).shape
(2, 2, 2)
>>> np.matmul(a, b)[0, 1, 1]
98
>>> sum(a[0, 1, :] * b[0 , :, 1])
98
Vector, vector returns the scalar inner product, but neither argument
is complex-conjugated:
>>> np.matmul([2j, 3j], [2j, 3j])
(-13+0j)
Scalar multiplication raises an error.
>>> np.matmul([1,2], 3)
Traceback (most recent call last):
...
ValueError: matmul: Input operand 1 does not have enough dimensions ...
The ``@`` operator can be used as a shorthand for ``np.matmul`` on
ndarrays.
>>> x1 = np.array([2j, 3j])
>>> x2 = np.array([2j, 3j])
>>> x1 @ x2
(-13+0j)
.. versionadded:: 1.10.0
""")
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y1 : ndarray
Fractional part of `x`.
$OUT_SCALAR_1
y2 : ndarray
Integral part of `x`.
$OUT_SCALAR_1
Notes
-----
For integer input the return values are floats.
See Also
--------
divmod : ``divmod(x, 1)`` is equivalent to ``modf`` with the return values
switched, except it always has a positive remainder.
Examples
--------
>>> np.modf([0, 3.5])
(array([ 0. , 0.5]), array([ 0., 3.]))
>>> np.modf(-0.5)
(-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays to be multiplied.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The product of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
Notes
-----
Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
The ``*`` operator can be used as a shorthand for ``np.multiply`` on
ndarrays.
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> x1 * x2
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
""")
add_newdoc('numpy.core.umath', 'negative',
"""
Numerical negative, element-wise.
Parameters
----------
x : array_like or scalar
Input array.
$PARAMS
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = -x`.
$OUT_SCALAR_1
Examples
--------
>>> np.negative([1.,-1.])
array([-1., 1.])
The unary ``-`` operator can be used as a shorthand for ``np.negative`` on
ndarrays.
>>> x1 = np.array(([1., -1.]))
>>> -x1
array([-1., 1.])
""")
add_newdoc('numpy.core.umath', 'positive',
"""
Numerical positive, element-wise.
.. versionadded:: 1.13.0
Parameters
----------
x : array_like or scalar
Input array.
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = +x`.
$OUT_SCALAR_1
Notes
-----
Equivalent to `x.copy()`, but only defined for types that support
arithmetic.
Examples
--------
>>> x1 = np.array(([1., -1.]))
>>> np.positive(x1)
array([ 1., -1.])
The unary ``+`` operator can be used as a shorthand for ``np.positive`` on
ndarrays.
>>> x1 = np.array(([1., -1.]))
>>> +x1
array([ 1., -1.])
""")
add_newdoc('numpy.core.umath', 'not_equal',
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal([1.,2.], [1., 3.])
array([False, True])
>>> np.not_equal([1, 2], [[1, 3],[1, 4]])
array([[False, True],
[False, True]])
The ``!=`` operator can be used as a shorthand for ``np.not_equal`` on
ndarrays.
>>> a = np.array([1., 2.])
>>> b = np.array([1., 3.])
>>> a != b
array([False, True])
""")
add_newdoc('numpy.core.umath', '_ones_like',
"""
This function used to be the numpy.ones_like, but now a specific
function for that has been written for consistency with the other
*_like functions. It is only used internally in a limited fashion now.
See Also
--------
ones_like
""")
add_newdoc('numpy.core.umath', 'power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in
`x2`. `x1` and `x2` must be broadcastable to the same shape. Note that an
integer type raised to a negative integer power will raise a ValueError.
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
$OUT_SCALAR_2
See Also
--------
float_power : power function that promotes integers to float
Examples
--------
Cube each element in an array.
>>> x1 = np.arange(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([ 0, 1, 8, 27, 64, 125])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0, 1, 8, 27, 16, 5],
[ 0, 1, 8, 27, 16, 5]])
The ``**`` operator can be used as a shorthand for ``np.power`` on
ndarrays.
>>> x2 = np.array([1, 2, 3, 3, 2, 1])
>>> x1 = np.arange(6)
>>> x1 ** x2
array([ 0, 1, 8, 27, 16, 5])
""")
add_newdoc('numpy.core.umath', 'float_power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in `x2`.
`x1` and `x2` must be broadcastable to the same shape. This differs from
the power function in that integers, float16, and float32 are promoted to
floats with a minimum precision of float64 so that the result is always
inexact. The intent is that the function will return a usable result for
negative powers and seldom overflow for positive powers.
.. versionadded:: 1.12.0
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
$OUT_SCALAR_2
See Also
--------
power : power function that preserves type
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.float_power(x1, 3)
array([ 0., 1., 8., 27., 64., 125.])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.float_power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.float_power(x1, x2)
array([[ 0., 1., 8., 27., 16., 5.],
[ 0., 1., 8., 27., 16., 5.]])
""")
add_newdoc('numpy.core.umath', 'radians',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Input array in degrees.
$PARAMS
Returns
-------
y : ndarray
The corresponding radian values.
$OUT_SCALAR_1
See Also
--------
deg2rad : equivalent function
Examples
--------
Convert a degree array to radians
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,
2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,
5.23598776, 5.75958653])
>>> out = np.zeros((deg.shape))
>>> ret = np.radians(deg, out)
>>> ret is out
True
""")
add_newdoc('numpy.core.umath', 'deg2rad',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Angles in degrees.
$PARAMS
Returns
-------
y : ndarray
The corresponding angle in radians.
$OUT_SCALAR_1
See Also
--------
rad2deg : Convert angles from radians to degrees.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
``deg2rad(x)`` is ``x * pi / 180``.
Examples
--------
>>> np.deg2rad(180)
3.1415926535897931
""")
add_newdoc('numpy.core.umath', 'reciprocal',
"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray
Return array.
$OUT_SCALAR_1
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
add_newdoc('numpy.core.umath', 'remainder',
"""
Return element-wise remainder of division.
Computes the remainder complementary to the `floor_divide` function. It is
equivalent to the Python modulus operator``x1 % x2`` and has the same sign
as the divisor `x2`. The MATLAB function equivalent to ``np.remainder``
is ``mod``.
.. warning::
This should not be confused with:
* Python 3.7's `math.remainder` and C's ``remainder``, which
computes the IEEE remainder, which are the complement to
``round(x1 / x2)``.
* The MATLAB ``rem`` function and or the C ``%`` operator which is the
complement to ``int(x1 / x2)``.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The element-wise remainder of the quotient ``floor_divide(x1, x2)``.
$OUT_SCALAR_2
See Also
--------
floor_divide : Equivalent of Python ``//`` operator.
divmod : Simultaneous floor division and remainder.
fmod : Equivalent of the MATLAB ``rem`` function.
divide, floor
Notes
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of)
integers.
``mod`` is an alias of ``remainder``.
Examples
--------
>>> np.remainder([4, 7], [2, 3])
array([0, 1])
>>> np.remainder(np.arange(7), 5)
array([0, 1, 2, 3, 4, 0, 1])
The ``%`` operator can be used as a shorthand for ``np.remainder`` on
ndarrays.
>>> x1 = np.arange(7)
>>> x1 % 5
array([0, 1, 2, 3, 4, 0, 1])
""")
add_newdoc('numpy.core.umath', 'divmod',
"""
Return element-wise quotient and remainder simultaneously.
.. versionadded:: 1.13.0
``np.divmod(x, y)`` is equivalent to ``(x // y, x % y)``, but faster
because it avoids redundant work. It is used to implement the Python
built-in function ``divmod`` on NumPy arrays.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out1 : ndarray
Element-wise quotient resulting from floor division.
$OUT_SCALAR_2
out2 : ndarray
Element-wise remainder from floor division.
$OUT_SCALAR_2
See Also
--------
floor_divide : Equivalent to Python's ``//`` operator.
remainder : Equivalent to Python's ``%`` operator.
modf : Equivalent to ``divmod(x, 1)`` for positive ``x`` with the return
values switched.
Examples
--------
>>> np.divmod(np.arange(5), 3)
(array([0, 0, 0, 1, 1]), array([0, 1, 2, 0, 1]))
The `divmod` function can be used as a shorthand for ``np.divmod`` on
ndarrays.
>>> x = np.arange(5)
>>> divmod(x, 3)
(array([0, 0, 0, 1, 1]), array([0, 1, 2, 0, 1]))
""")
add_newdoc('numpy.core.umath', 'right_shift',
"""
Shift the bits of an integer to the right.
Bits are shifted to the right `x2`. Because the internal
representation of numbers is in binary format, this operation is
equivalent to dividing `x1` by ``2**x2``.
Parameters
----------
x1 : array_like, int
Input values.
x2 : array_like, int
Number of bits to remove at the right of `x1`.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
$OUT_SCALAR_2
See Also
--------
left_shift : Shift the bits of an integer to the left.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(10)
'1010'
>>> np.right_shift(10, 1)
5
>>> np.binary_repr(5)
'101'
>>> np.right_shift(10, [1,2,3])
array([5, 2, 1])
The ``>>`` operator can be used as a shorthand for ``np.right_shift`` on
ndarrays.
>>> x1 = 10
>>> x2 = np.array([1,2,3])
>>> x1 >> x2
array([5, 2, 1])
""")
add_newdoc('numpy.core.umath', 'rint',
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
out : ndarray or scalar
Output array is same shape and type as `x`.
$OUT_SCALAR_1
See Also
--------
fix, ceil, floor, trunc
Notes
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'sign',
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. nan
is returned for nan inputs.
For complex inputs, the `sign` function returns
``sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j``.
complex(nan, 0) is returned for complex nan inputs.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray
The sign of `x`.
$OUT_SCALAR_1
Notes
-----
There is more than one definition of sign in common use for complex
numbers. The definition used here is equivalent to :math:`x/\\sqrt{x*x}`
which is different from a common alternative, :math:`x/|x|`.
Examples
--------
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
>>> np.sign(5-2j)
(1+0j)
""")
add_newdoc('numpy.core.umath', 'signbit',
"""
Returns element-wise True where signbit is set (less than zero).
Parameters
----------
x : array_like
The input value(s).
$PARAMS
Returns
-------
result : ndarray of bool
Output array, or reference to `out` if that was supplied.
$OUT_SCALAR_1
Examples
--------
>>> np.signbit(-1.2)
True
>>> np.signbit(np.array([1, -2.3, 2.1]))
array([False, True, False])
""")
add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
If `x2` is a scalar, its sign will be copied to all elements of `x1`.
Parameters
----------
x1 : array_like
Values to change the sign of.
x2 : array_like
The sign of `x2` is copied to `x1`.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
The values of `x1` with the sign of `x2`.
$OUT_SCALAR_2
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> np.copysign([-1, 0, 1], -1.1)
array([-1., -0., -1.])
>>> np.copysign([-1, 0, 1], np.arange(3)-1)
array([-1., 0., 1.])
""")
add_newdoc('numpy.core.umath', 'nextafter',
"""
Return the next floating-point value after x1 towards x2, element-wise.
Parameters
----------
x1 : array_like
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
The next representable values of `x1` in the direction of `x2`.
$OUT_SCALAR_2
Examples
--------
>>> eps = np.finfo(np.float64).eps
>>> np.nextafter(1, 2) == eps + 1
True
>>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps]
array([ True, True])
""")
add_newdoc('numpy.core.umath', 'spacing',
"""
Return the distance between x and the nearest adjacent number.
Parameters
----------
x : array_like
Values to find the spacing of.
$PARAMS
Returns
-------
out : ndarray or scalar
The spacing of values of `x`.
$OUT_SCALAR_1
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and NaN is NaN.
Examples
--------
>>> np.spacing(1) == np.finfo(np.float64).eps
True
""")
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
Parameters
----------
x : array_like
Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).
$PARAMS
Returns
-------
y : array_like
The sine of each element of x.
$OUT_SCALAR_1
See Also
--------
arcsin, sinh, cos
Notes
-----
The sine is one of the fundamental functions of trigonometry (the
mathematical study of triangles). Consider a circle of radius 1
centered on the origin. A ray comes in from the :math:`+x` axis, makes
an angle at the origin (measured counter-clockwise from that axis), and
departs from the origin. The :math:`y` coordinate of the outgoing
ray's intersection with the unit circle is the sine of that angle. It
ranges from -1 for :math:`x=3\\pi / 2` to +1 for :math:`\\pi / 2.` The
function has zeroes where the angle is a multiple of :math:`\\pi`.
Sines of angles between :math:`\\pi` and :math:`2\\pi` are negative.
The numerous properties of the sine and related functions are included
in any standard trigonometry text.
Examples
--------
Print sine of one angle:
>>> np.sin(np.pi/2.)
1.0
Print sines of an array of angles given in degrees:
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ])
Plot the sine function:
>>> import matplotlib.pylab as plt
>>> x = np.linspace(-np.pi, np.pi, 201)
>>> plt.plot(x, np.sin(x))
>>> plt.xlabel('Angle [rad]')
>>> plt.ylabel('sin(x)')
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'sinh',
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or
``-1j * np.sin(1j*x)``.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray
The corresponding hyperbolic sine values.
$OUT_SCALAR_1
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
Examples
--------
>>> np.sinh(0)
0.0
>>> np.sinh(np.pi*1j/2)
1j
>>> np.sinh(np.pi*1j) # (exact value is 0)
1.2246063538223773e-016j
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='d')
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
add_newdoc('numpy.core.umath', 'sqrt',
"""
Return the non-negative square-root of an array, element-wise.
Parameters
----------
x : array_like
The values whose square-roots are required.
$PARAMS
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. If any element in `x` is
complex, a complex array is returned (and the square-roots of
negative reals are calculated). If all of the elements in `x`
are real, so is `y`, with negative elements returning ``nan``.
If `out` was provided, `y` is a reference to it.
$OUT_SCALAR_1
See Also
--------
lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
Notes
-----
*sqrt* has--consistent with common convention--as its branch cut the
real "interval" [`-inf`, 0), and is continuous from above on it.
A branch cut is a curve in the complex plane across which a given
complex function fails to be continuous.
Examples
--------
>>> np.sqrt([1,4,9])
array([ 1., 2., 3.])
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
>>> np.sqrt([4, -1, np.inf])
array([ 2., nan, inf])
""")
add_newdoc('numpy.core.umath', 'cbrt',
"""
Return the cube-root of an array, element-wise.
.. versionadded:: 1.10.0
Parameters
----------
x : array_like
The values whose cube-roots are required.
$PARAMS
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the cube
cube-root of each element in `x`.
If `out` was provided, `y` is a reference to it.
$OUT_SCALAR_1
Examples
--------
>>> np.cbrt([1,8,27])
array([ 1., 2., 3.])
""")
add_newdoc('numpy.core.umath', 'square',
"""
Return the element-wise square of the input.
Parameters
----------
x : array_like
Input data.
$PARAMS
Returns
-------
out : ndarray or scalar
Element-wise `x*x`, of the same shape and dtype as `x`.
$OUT_SCALAR_1
See Also
--------
numpy.linalg.matrix_power
sqrt
power
Examples
--------
>>> np.square([-1j, 1])
array([-1.-0.j, 1.+0.j])
""")
add_newdoc('numpy.core.umath', 'subtract',
"""
Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be subtracted from each other.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The difference of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
Notes
-----
Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[ 0., 0., 0.],
[ 3., 3., 3.],
[ 6., 6., 6.]])
The ``-`` operator can be used as a shorthand for ``np.subtract`` on
ndarrays.
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> x1 - x2
array([[0., 0., 0.],
[3., 3., 3.],
[6., 6., 6.]])
""")
add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray
The corresponding tangent values.
$OUT_SCALAR_1
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out1 = np.array([0], dtype='d')
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)`` or ``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
$OUT_SCALAR_1
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
.. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
https://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
>>> np.tanh((0, np.pi*1j, np.pi*1j/2))
array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out1 = np.array([0], dtype='d')
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
$OUT_SCALAR_2
Notes
-----
In Python, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
The ``/`` operator can be used as a shorthand for ``np.true_divide`` on
ndarrays.
>>> x = np.arange(5)
>>> x / 4
array([0. , 0.25, 0.5 , 0.75, 1. ])
""")
add_newdoc('numpy.core.umath', 'frexp',
"""
Decompose the elements of x into mantissa and twos exponent.
Returns (`mantissa`, `exponent`), where `x = mantissa * 2**exponent``.
The mantissa lies in the open interval(-1, 1), while the twos
exponent is a signed integer.
Parameters
----------
x : array_like
Array of numbers to be decomposed.
out1 : ndarray, optional
Output array for the mantissa. Must have the same shape as `x`.
out2 : ndarray, optional
Output array for the exponent. Must have the same shape as `x`.
$PARAMS
Returns
-------
mantissa : ndarray
Floating values between -1 and 1.
$OUT_SCALAR_1
exponent : ndarray
Integer exponents of 2.
$OUT_SCALAR_1
See Also
--------
ldexp : Compute ``y = x1 * 2**x2``, the inverse of `frexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Examples
--------
>>> x = np.arange(9)
>>> y1, y2 = np.frexp(x)
>>> y1
array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875,
0.5 ])
>>> y2
array([0, 1, 2, 2, 3, 3, 3, 3, 4])
>>> y1 * 2**y2
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.])
""")
add_newdoc('numpy.core.umath', 'ldexp',
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : array_like
Array of multipliers.
x2 : array_like, int
Array of twos exponents.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
$OUT_SCALAR_2
See Also
--------
frexp : Return (y1, y2) from ``x = y1 * 2**y2``, inverse to `ldexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.], dtype=float16)
>>> x = np.arange(6)
>>> np.ldexp(*np.frexp(x))
array([ 0., 1., 2., 3., 4., 5.])
""")
add_newdoc('numpy.core.umath', 'gcd',
"""
Returns the greatest common divisor of ``|x1|`` and ``|x2|``
Parameters
----------
x1, x2 : array_like, int
Arrays of values.
$BROADCASTABLE_2
Returns
-------
y : ndarray or scalar
The greatest common divisor of the absolute value of the inputs
$OUT_SCALAR_2
See Also
--------
lcm : The lowest common multiple
Examples
--------
>>> np.gcd(12, 20)
4
>>> np.gcd.reduce([15, 25, 35])
5
>>> np.gcd(np.arange(6), 20)
array([20, 1, 2, 1, 4, 5])
""")
add_newdoc('numpy.core.umath', 'lcm',
"""
Returns the lowest common multiple of ``|x1|`` and ``|x2|``
Parameters
----------
x1, x2 : array_like, int
Arrays of values.
$BROADCASTABLE_2
Returns
-------
y : ndarray or scalar
The lowest common multiple of the absolute value of the inputs
$OUT_SCALAR_2
See Also
--------
gcd : The greatest common divisor
Examples
--------
>>> np.lcm(12, 20)
60
>>> np.lcm.reduce([3, 12, 20])
60
>>> np.lcm.reduce([40, 12, 20])
120
>>> np.lcm(np.arange(6), 20)
array([ 0, 20, 20, 60, 20, 20])
""")
| bsd-3-clause |
dessn/sn-bhm | dessn/general/pecvelcor.py | 1 | 6988 | import numpy as np
import matplotlib.pyplot as plt
'''
Functions to derive the uncertainty associated with correcting observed redshifts in the CMB frame
for peculiar motion. Which function(s) is/are needed depends on the approach of the cosmological fit.
The simplest solution is to propagate the redshift uncertainty from get_sigma_redshift_pecvel() as
the uncertainty is constant in redshift. If that is not possible the uncertainty can be shifted
to the distance modulus (or whatever magnitude you have in your code, possibly m_B). For magnitudes
however the uncertainty is not constant, but decreases with redshift (corresponding to the slope of
the mu vs redshift plot). The function to call is then get_sigma_mu_pecvel() which takes as input a
numpy like array that contains the cosmological redshifts of the SNe (z_Hd or zcor) and returns a
numpy array with the magnitude uncertainties for each redshift to propagate. If you are working
with the full covariance matrix this array is added to the diagonal of that covariance matrix.
IMPORTANT: If you are already propagating an uncertainty from the linear theory parameter beta,
which scales linearly with the peculiar velocites, set sigma_lineartheory to zero.
For further details see: https://www.overleaf.com/read/bzpnwnmcqhfj
'''
def get_sigma_redshift_pecvel(sigma_lineartheory=150., sigma_shotnoise=125., sigma_missingdata=100.):
'''Return the redshift uncertainty asociated with correcting redhift for
peculiar motion.
Keyword arguments:
sigma_lineartheory -- uncertainty from limits of linear theory (default 150.0)
sigma_shotnoise -- uncertainty from shotnoise in 2M++ (default 125.0)
sigma_missingdata -- uncertainty from missing data in 2M++ (default 100.0)
'''
assert isinstance(sigma_lineartheory, float), 'Expected float but found %s' % type(sigma_lineartheory)
assert isinstance(sigma_shotnoise, float), 'Expected float but found %s' % type(sigma_lineartheory)
assert isinstance(sigma_missingdata, float), 'Expected float but found %s' % type(sigma_lineartheory)
sigma_redshift_pecvel = np.sqrt(sigma_lineartheory ** 2 + sigma_shotnoise ** 2 + sigma_missingdata ** 2) / 2.99792e5
return sigma_redshift_pecvel
def get_sigma_mu_pecvel(redshifts, sigma_lineartheory=150., sigma_shotnoise=125., sigma_missingdata=100.):
'''Return the magnitude uncertainty asociated with correcting redhift for
peculiar motion.
Keyword arguments:
redshifts -- array of cosmological redshifts in CMB frame
sigma_lineartheory -- uncertainty from limits of linear theory (default 150.0)
sigma_shotnoise -- uncertainty from shotnoise in 2M++ (default 125.0)
sigma_missingdata -- uncertainty from missing data in 2M++ (default 100.0)
'''
assert isinstance(redshifts, np.ndarray), 'Expected numpy array but found %s' % type(redshifts)
assert np.min(redshifts) > 0., 'Zero or negative redshift in input array!'
sigma_redshift_pecvel = get_sigma_redshift_pecvel(sigma_lineartheory=sigma_lineartheory,
sigma_shotnoise=sigma_shotnoise,
sigma_missingdata=sigma_missingdata)
sigma_mu_pecvel = sigma_redshift_pecvel * 5. / np.log(10.) * (1. + redshifts) ** 2 / (
redshifts * (1 + 0.5 * redshifts))
return sigma_mu_pecvel
if __name__ == '__main__':
# Some very basic tests
# Type test
try:
sigma_redshift_pecvel = get_sigma_redshift_pecvel()
if type(sigma_redshift_pecvel) == np.float64:
pass
else:
print("get_sigma_redshift_pecvel() type test failed, did not return a np.float64!")
except:
pass
# Assertion test
try:
sigma_redshift_pecvel = get_sigma_redshift_pecvel(sigma_lineartheory=[])
if type(sigma_redshift_pecvel) == np.float64:
pass
else:
print("get_sigma_redshift_pecvel() type test failed, did not return a np.float64!")
except AssertionError:
pass
except:
"get_sigma_redshift_pecvel() assertion test failed, did not raise AssertionError!"
# Assertion test
try:
sigma_redshift_pecvel = get_sigma_redshift_pecvel(sigma_missingdata=[])
if type(sigma_redshift_pecvel) == np.float64:
pass
else:
print("get_sigma_redshift_pecvel() type test failed, did not return a np.float64!")
except AssertionError:
pass
except:
"get_sigma_redshift_pecvel() assertion test failed, did not raise AssertionError!"
# Assertion test
try:
sigma_redshift_pecvel = get_sigma_redshift_pecvel(sigma_lineartheory=[])
if type(sigma_redshift_pecvel) == np.float64:
pass
else:
print("get_sigma_redshift_pecvel() type test failed, did not return a np.float64!")
except AssertionError:
pass
except:
"get_sigma_redshift_pecvel() assertion test failed, did not raise AssertionError!"
### Function: get_sigma_mu_pecvel()
# Output type test
try:
if type(get_sigma_mu_pecvel(np.array([0.1]))) == np.ndarray:
pass
else:
print("get_sigma_mu_pecvel() type test failed, did not return numpy array!")
except:
pass
# Assertion test
try:
get_sigma_mu_pecvel(np.array([0.1, -0.1, 2.]))
except AssertionError:
pass
except:
"get_sigma_mu_pecvel() assertion test failed, did not raise AssertionError!"
# Assertion test
try:
get_sigma_mu_pecvel(np.array([0.1, 0.1, 2., 0.]))
except AssertionError:
pass
except:
"get_sigma_mu_pecvel() assertion test failed, did not raise AssertionError!"
# Assertion test
try:
get_sigma_mu_pecvel(1.)
except AssertionError:
pass
except:
"get_sigma_mu_pecvel() assertion test failed, did not raise AssertionError!"
# Assertion test
try:
get_sigma_mu_pecvel([0.1, 0.2, 0.3])
except AssertionError:
pass
except:
"get_sigma_mu_pecvel() assertion test failed, did not raise AssertionError!"
# This produces a plot to visually test the output of get_sigma_mu_pecvel()
redshifts = np.linspace(0.001, 0.1, 100)
sigma_mu_pecvel = get_sigma_mu_pecvel(redshifts)
plt.figure()
plt.xlim((0., .1))
plt.ylim((0, 0.2))
plt.xlabel('Cosmological redshift', size='large')
plt.ylabel(r'$\sigma_{\mu ,pec}$', size='x-large')
plt.plot([0.01, 0.05, 0.1], [.1612858744, 0.03418254355, 0.01831115415], 'kx')
plt.plot(redshifts, sigma_mu_pecvel, 'k')
plt.plot(redshifts, get_sigma_mu_pecvel(redshifts, sigma_lineartheory=300.), 'b')
plt.plot([0.02, 0.05, 0.1], [.1268554479, 0.05298401655, 0.02838286428], 'bx')
plt.text(0.02, 0.175,
'Visual test: Black crosses should intersect the black line, \n ditto with blue crosses and line')
plt.show()
| mit |
giopastor/moose | examples/ex14_pps/plot.py | 14 | 1194 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import csv
# Python 2.7 does not have str.isnumeric()?
def isInt(string):
try:
int(string)
return True
except ValueError:
return False
# Format of the CSV file is:
# time,dofs,integral
# 1,221,2.3592493758695,
# 2,841,0.30939803328432,
# 3,3281,0.088619511656913,
# 4,12961,0.022979021365857,
# 5,51521,0.0057978748995635,
# 6,205441,0.0014528130907967,
reader = csv.reader(file('out.csv'))
dofs = []
errs = []
for row in reader:
if row and isInt(row[0]): # Skip rows that don't start with numbers.
dofs.append(int(row[1]))
errs.append(float(row[2]))
# Construct data to be plotted
xdata = np.log10(np.sqrt(dofs))
ydata = np.log10(errs)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(xdata, ydata, 'bo-')
ax1.set_xlabel('log (1/h)')
ax1.set_ylabel('log (L2-error)')
# Create linear curve fits of the data, but just the last couple data
# point when we are in the asymptotic regime.
fit = np.polyfit(xdata[2:-1], ydata[2:-1], 1)
fit_msg = 'Slope ~ ' + '%.2f' % fit[0]
ax1.text(2.0, -1.0, fit_msg)
plt.savefig('plot.pdf', format='pdf')
# Local Variables:
# python-indent: 2
# End:
| lgpl-2.1 |
h2oai/h2o-3 | h2o-py/h2o/h2o.py | 2 | 117595 | # -*- encoding: utf-8 -*-
"""
h2o -- module for using H2O services.
:copyright: (c) 2016 H2O.ai
:license: Apache License Version 2.0 (see LICENSE for details)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import subprocess
import tempfile
import warnings
import webbrowser
from .backend import H2OConnection
from .backend import H2OConnectionConf
from .backend import H2OLocalServer
from .base import Keyed
from .estimators import create_estimator
from .estimators.generic import H2OGenericEstimator
from .exceptions import H2OConnectionError, H2OValueError, H2OError, H2ODeprecationWarning
from .estimators.gbm import H2OGradientBoostingEstimator
from .estimators.glm import H2OGeneralizedLinearEstimator
from .estimators.glrm import H2OGeneralizedLowRankEstimator
from .estimators.kmeans import H2OKMeansEstimator
from .estimators.naive_bayes import H2ONaiveBayesEstimator
from .estimators.pca import H2OPrincipalComponentAnalysisEstimator
from .estimators.random_forest import H2ORandomForestEstimator
from .estimators.stackedensemble import H2OStackedEnsembleEstimator
from .estimators.word2vec import H2OWord2vecEstimator
from .estimators.isolation_forest import H2OIsolationForestEstimator
from .transforms.decomposition import H2OSVD
from .estimators.xgboost import H2OXGBoostEstimator
from .estimators.deeplearning import H2OAutoEncoderEstimator, H2ODeepLearningEstimator
from .estimators.extended_isolation_forest import H2OExtendedIsolationForestEstimator
from .exceptions import H2OConnectionError, H2OValueError
from .expr import ExprNode
from .frame import H2OFrame
from .grid.grid_search import H2OGridSearch
from .job import H2OJob
from .model.model_base import ModelBase
from .utils.compatibility import * # NOQA
from .utils.config import H2OConfigReader
from .utils.metaclass import deprecated_fn
from .utils.shared_utils import check_frame_id, gen_header, py_tmp_key, quoted
from .utils.typechecks import assert_is_type, assert_satisfies, BoundInt, BoundNumeric, I, is_type, numeric, U
# enable h2o deprecation warnings by default to ensure that users get notified in interactive mode, without being too annoying
warnings.filterwarnings("once", category=H2ODeprecationWarning)
# An IPython deprecation warning is triggered after h2o.init(). Remove this once the deprecation has been resolved
# warnings.filterwarnings('ignore', category=DeprecationWarning, module='.*/IPython/.*')
h2oconn = None # type: H2OConnection
def connect(server=None, url=None, ip=None, port=None,
https=None, verify_ssl_certificates=None, cacert=None,
auth=None, proxy=None, cookies=None, verbose=True, config=None):
"""
Connect to an existing H2O server, remote or local.
There are two ways to connect to a server: either pass a `server` parameter containing an instance of
an H2OLocalServer, or specify `ip` and `port` of the server that you want to connect to.
:param server: An H2OLocalServer instance to connect to (optional).
:param url: Full URL of the server to connect to (can be used instead of `ip` + `port` + `https`).
:param ip: The ip address (or host name) of the server where H2O is running.
:param port: Port number that H2O service is listening to.
:param https: Set to True to connect via https:// instead of http://.
:param verify_ssl_certificates: When using https, setting this to False will disable SSL certificates verification.
:param cacert: Path to a CA bundle file or a directory with certificates of trusted CAs (optional).
:param auth: Either a (username, password) pair for basic authentication, an instance of h2o.auth.SpnegoAuth
or one of the requests.auth authenticator objects.
:param proxy: Proxy server address.
:param cookies: Cookie (or list of) to add to request
:param verbose: Set to False to disable printing connection status messages.
:param connection_conf: Connection configuration object encapsulating connection parameters.
:returns: the new :class:`H2OConnection` object.
:examples:
>>> import h2o
>>> ipA = "127.0.0.1"
>>> portN = "54321"
>>> urlS = "http://127.0.0.1:54321"
>>> connect_type=h2o.connect(ip=ipA, port=portN, verbose=True)
# or
>>> connect_type2 = h2o.connect(url=urlS, https=True, verbose=True)
"""
global h2oconn
if config:
if "connect_params" in config:
h2oconn = _connect_with_conf(config["connect_params"])
else:
h2oconn = _connect_with_conf(config)
else:
h2oconn = H2OConnection.open(server=server, url=url, ip=ip, port=port, https=https,
auth=auth, verify_ssl_certificates=verify_ssl_certificates, cacert=cacert,
proxy=proxy, cookies=cookies,
verbose=verbose)
if verbose:
h2oconn.cluster.show_status()
return h2oconn
def api(endpoint, data=None, json=None, filename=None, save_to=None):
"""
Perform a REST API request to a previously connected server.
This function is mostly for internal purposes, but may occasionally be useful for direct access to
the backend H2O server. It has same parameters as :meth:`H2OConnection.request <h2o.backend.H2OConnection.request>`.
:examples:
>>> res = h2o.api("GET /3/NetworkTest")
>>> res["table"].show()
"""
# type checks are performed in H2OConnection class
_check_connection()
return h2oconn.request(endpoint, data=data, json=json, filename=filename, save_to=save_to)
def connection():
"""Return the current :class:`H2OConnection` handler.
:examples:
>>> temp = h2o.connection()
>>> temp
"""
return h2oconn
def version_check():
"""Used to verify that h2o-python module and the H2O server are compatible with each other."""
from .__init__ import __version__ as ver_pkg
ci = h2oconn.cluster
if not ci:
raise H2OConnectionError("Connection not initialized. Did you run h2o.connect()?")
ver_h2o = ci.version
if ver_pkg == "SUBST_PROJECT_VERSION": ver_pkg = "UNKNOWN"
if str(ver_h2o) != str(ver_pkg):
branch_name_h2o = ci.branch_name
build_number_h2o = ci.build_number
if build_number_h2o is None or build_number_h2o == "unknown":
raise H2OConnectionError(
"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Upgrade H2O and h2o-Python to latest stable version - "
"http://h2o-release.s3.amazonaws.com/h2o/latest_stable.html"
"".format(ver_h2o, ver_pkg))
elif build_number_h2o == "99999":
raise H2OConnectionError(
"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"This is a developer build, please contact your developer."
"".format(ver_h2o, ver_pkg))
else:
raise H2OConnectionError(
"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Install the matching h2o-Python version from - "
"http://h2o-release.s3.amazonaws.com/h2o/{2}/{3}/index.html."
"".format(ver_h2o, ver_pkg, branch_name_h2o, build_number_h2o))
# Check age of the install
if ci.build_too_old:
print("Warning: Your H2O cluster version is too old ({})! Please download and install the latest "
"version from http://h2o.ai/download/".format(ci.build_age))
def init(url=None, ip=None, port=None, name=None, https=None, cacert=None, insecure=None, username=None, password=None,
cookies=None, proxy=None, start_h2o=True, nthreads=-1, ice_root=None, log_dir=None, log_level=None,
max_log_file_size=None, enable_assertions=True, max_mem_size=None, min_mem_size=None, strict_version_check=None,
ignore_config=False, extra_classpath=None, jvm_custom_args=None, bind_to_localhost=True, **kwargs):
"""
Attempt to connect to a local server, or if not successful start a new server and connect to it.
:param url: Full URL of the server to connect to (can be used instead of `ip` + `port` + `https`).
:param ip: The ip address (or host name) of the server where H2O is running.
:param port: Port number that H2O service is listening to.
:param name: Cluster name. If None while connecting to an existing cluster it will not check the cluster name.
If set then will connect only if the target cluster name matches. If no instance is found and decides to start a local
one then this will be used as the cluster name or a random one will be generated if set to None.
:param https: Set to True to connect via https:// instead of http://.
:param cacert: Path to a CA bundle file or a directory with certificates of trusted CAs (optional).
:param insecure: When using https, setting this to True will disable SSL certificates verification.
:param username: Username and
:param password: Password for basic authentication.
:param cookies: Cookie (or list of) to add to each request.
:param proxy: Proxy server address.
:param start_h2o: If False, do not attempt to start an h2o server when connection to an existing one failed.
:param nthreads: "Number of threads" option when launching a new h2o server.
:param ice_root: Directory for temporary files for the new h2o server.
:param log_dir: Directory for H2O logs to be stored if a new instance is started. Ignored if connecting to an existing node.
:param log_level: The logger level for H2O if a new instance is started. One of TRACE,DEBUG,INFO,WARN,ERRR,FATA. Default is INFO. Ignored if connecting to an existing node.
:param max_log_file_size: Maximum size of INFO and DEBUG log files. The file is rolled over after a specified size has been reached. (The default is 3MB. Minimum is 1MB and maximum is 99999MB)
:param enable_assertions: Enable assertions in Java for the new h2o server.
:param max_mem_size: Maximum memory to use for the new h2o server. Integer input will be evaluated as gigabytes. Other units can be specified by passing in a string (e.g. "160M" for 160 megabytes).
- **Note:** If `max_mem_size` is not defined, then the amount of memory that H2O allocates will be determined by the default memory of the Java Virtual Machine (JVM). This amount depends on the Java version, but it will generally be 25% of the machine's physical memory.
:param min_mem_size: Minimum memory to use for the new h2o server. Integer input will be evaluated as gigabytes. Other units can be specified by passing in a string (e.g. "160M" for 160 megabytes).
:param strict_version_check: If True, an error will be raised if the client and server versions don't match.
:param ignore_config: Indicates whether a processing of a .h2oconfig file should be conducted or not. Default value is False.
:param extra_classpath: List of paths to libraries that should be included on the Java classpath when starting H2O from Python.
:param kwargs: (all other deprecated attributes)
:param jvm_custom_args: Customer, user-defined argument's for the JVM H2O is instantiated in. Ignored if there is an instance of H2O already running and the client connects to it.
:param bind_to_localhost: A flag indicating whether access to the H2O instance should be restricted to the local machine (default) or if it can be reached from other computers on the network.
:examples:
>>> import h2o
>>> h2o.init(ip="localhost", port=54323)
"""
global h2oconn
assert_is_type(url, str, None)
assert_is_type(ip, str, None)
assert_is_type(port, int, str, None)
assert_is_type(name, str, None)
assert_is_type(https, bool, None)
assert_is_type(insecure, bool, None)
assert_is_type(username, str, None)
assert_is_type(password, str, None)
assert_is_type(cookies, str, [str], None)
assert_is_type(proxy, {str: str}, None)
assert_is_type(start_h2o, bool, None)
assert_is_type(nthreads, int)
assert_is_type(ice_root, str, None)
assert_is_type(log_dir, str, None)
assert_is_type(log_level, str, None)
assert_satisfies(log_level, log_level in [None, "TRACE", "DEBUG", "INFO", "WARN", "ERRR", "FATA"])
assert_is_type(enable_assertions, bool)
assert_is_type(max_mem_size, int, str, None)
assert_is_type(min_mem_size, int, str, None)
assert_is_type(strict_version_check, bool, None)
assert_is_type(extra_classpath, [str], None)
assert_is_type(jvm_custom_args, [str], None)
assert_is_type(bind_to_localhost, bool)
assert_is_type(kwargs, {"proxies": {str: str}, "max_mem_size_GB": int, "min_mem_size_GB": int,
"force_connect": bool, "as_port": bool})
def get_mem_size(mmint, mmgb):
if not mmint: # treat 0 and "" as if they were None
if mmgb is None: return None
return mmgb << 30
if is_type(mmint, int):
# If the user gives some small number just assume it's in Gigabytes...
if mmint < 1000: return mmint << 30
return mmint
if is_type(mmint, str):
last = mmint[-1].upper()
num = mmint[:-1]
if not (num.isdigit() and last in "MGT"):
raise H2OValueError("Wrong format for a *_memory_size argument: %s (should be a number followed by "
"a suffix 'M', 'G' or 'T')" % mmint)
if last == "T": return int(num) << 40
if last == "G": return int(num) << 30
if last == "M": return int(num) << 20
scheme = "https" if https else "http"
proxy = proxy[scheme] if proxy is not None and scheme in proxy else \
kwargs["proxies"][scheme] if "proxies" in kwargs and scheme in kwargs["proxies"] else None
mmax = get_mem_size(max_mem_size, kwargs.get("max_mem_size_GB"))
mmin = get_mem_size(min_mem_size, kwargs.get("min_mem_size_GB"))
auth = (username, password) if username and password else None
check_version = True
verify_ssl_certificates = not insecure
# Apply the config file if ignore_config=False
if not ignore_config:
config = H2OConfigReader.get_config()
if url is None and ip is None and port is None and https is None and "init.url" in config:
url = config["init.url"]
if proxy is None and "init.proxy" in config:
proxy = config["init.proxy"]
if cookies is None and "init.cookies" in config:
cookies = config["init.cookies"].split(";")
if auth is None and "init.username" in config and "init.password" in config:
auth = (config["init.username"], config["init.password"])
if strict_version_check is None:
if "init.check_version" in config:
check_version = config["init.check_version"].lower() != "false"
elif os.environ.get("H2O_DISABLE_STRICT_VERSION_CHECK"):
check_version = False
else:
check_version = strict_version_check
# Note: `verify_ssl_certificates` is never None at this point => use `insecure` to check for None/default input)
if insecure is None and "init.verify_ssl_certificates" in config:
verify_ssl_certificates = config["init.verify_ssl_certificates"].lower() != "false"
if cacert is None:
if "init.cacert" in config:
cacert = config["init.cacert"]
assert_is_type(verify_ssl_certificates, bool)
if not start_h2o:
print("Warning: if you don't want to start local H2O server, then use of `h2o.connect()` is preferred.")
try:
h2oconn = H2OConnection.open(url=url, ip=ip, port=port, name=name, https=https,
verify_ssl_certificates=verify_ssl_certificates, cacert=cacert,
auth=auth, proxy=proxy,cookies=cookies, verbose=True,
_msgs=("Checking whether there is an H2O instance running at {url} ",
"connected.", "not found."))
except H2OConnectionError:
# Backward compatibility: in init() port parameter really meant "baseport" when starting a local server...
if port and not str(port).endswith("+") and not kwargs.get("as_port", False):
port = str(port) + "+"
if not start_h2o: raise
if ip and not (ip == "localhost" or ip == "127.0.0.1"):
raise H2OConnectionError('Can only start H2O launcher if IP address is localhost.')
if https:
raise H2OConnectionError('Starting local server is not available with https enabled. You may start local'
' instance of H2O with https manually '
'(https://docs.h2o.ai/h2o/latest-stable/h2o-docs/welcome.html#new-user-quick-start).')
hs = H2OLocalServer.start(nthreads=nthreads, enable_assertions=enable_assertions, max_mem_size=mmax,
min_mem_size=mmin, ice_root=ice_root, log_dir=log_dir, log_level=log_level,
max_log_file_size=max_log_file_size, port=port, name=name,
extra_classpath=extra_classpath, jvm_custom_args=jvm_custom_args,
bind_to_localhost=bind_to_localhost)
h2oconn = H2OConnection.open(server=hs, https=https, verify_ssl_certificates=verify_ssl_certificates,
cacert=cacert, auth=auth, proxy=proxy,cookies=cookies, verbose=True)
if check_version:
version_check()
h2oconn.cluster.timezone = "UTC"
h2oconn.cluster.show_status()
def resume(recovery_dir=None):
"""
Triggers auto-recovery resume - this will look into configured recovery dir and resume and
tasks that were interrupted by unexpected cluster stopping.
:param recovery_dir: A path to where cluster recovery data is stored, if blank, will use cluster's configuration.
"""
params = {
"recovery_dir": recovery_dir
}
api(endpoint="POST /3/Recovery/resume", data=params)
def lazy_import(path, pattern=None):
"""
Import a single file or collection of files.
:param path: A path to a data file (remote or local).
:param pattern: Character string containing a regular expression to match file(s) in the folder.
:returns: either a :class:`H2OFrame` with the content of the provided file, or a list of such frames if
importing multiple files.
:examples:
>>> iris = h2o.lazy_import("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
"""
assert_is_type(path, str, [str])
assert_is_type(pattern, str, None)
paths = [path] if is_type(path, str) else path
return _import_multi(paths, pattern)
def _import_multi(paths, pattern):
assert_is_type(paths, [str])
assert_is_type(pattern, str, None)
j = api("POST /3/ImportFilesMulti", {"paths": paths, "pattern": pattern})
if j["fails"]: raise ValueError("ImportFiles of '" + ".".join(paths) + "' failed on " + str(j["fails"]))
return j["destination_frames"]
def upload_file(path, destination_frame=None, header=0, sep=None, col_names=None, col_types=None,
na_strings=None, skipped_columns=None, quotechar=None, escapechar=None):
"""
Upload a dataset from the provided local path to the H2O cluster.
Does a single-threaded push to H2O. Also see :meth:`import_file`.
:param path: A path specifying the location of the data to upload.
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will
be automatically generated.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param sep: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param col_names: A list of column names for the file.
:param col_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. The possible types a column may have are:
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:param skipped_columns: an integer lists of column indices to skip and not parsed into the final frame from the import file.
:param quotechar: A hint for the parser which character to expect as quoting character. Only single quote, double quote or None (default) are allowed. None means automatic detection.
:param escapechar: (Optional) One ASCII character used to escape other characters.
:returns: a new :class:`H2OFrame` instance.
:examples:
>>> iris_df = h2o.upload_file("~/Desktop/repos/h2o-3/smalldata/iris/iris.csv")
"""
coltype = U(None, "unknown", "uuid", "string", "float", "real", "double", "int", "numeric",
"categorical", "factor", "enum", "time")
natype = U(str, [str])
assert_is_type(path, str)
assert_is_type(destination_frame, str, None)
assert_is_type(header, -1, 0, 1)
assert_is_type(sep, None, I(str, lambda s: len(s) == 1))
assert_is_type(col_names, [str], None)
assert_is_type(col_types, [coltype], {str: coltype}, None)
assert_is_type(na_strings, [natype], {str: natype}, None)
assert_is_type(quotechar, None, U("'", '"'))
assert (skipped_columns==None) or isinstance(skipped_columns, list), \
"The skipped_columns should be an list of column names!"
assert_is_type(escapechar, None, I(str, lambda s: len(s) == 1))
check_frame_id(destination_frame)
if path.startswith("~"):
path = os.path.expanduser(path)
return H2OFrame()._upload_parse(path, destination_frame, header, sep, col_names, col_types, na_strings, skipped_columns,
quotechar, escapechar)
def import_file(path=None, destination_frame=None, parse=True, header=0, sep=None, col_names=None, col_types=None,
na_strings=None, pattern=None, skipped_columns=None, custom_non_data_line_markers=None,
partition_by=None, quotechar=None, escapechar=None):
"""
Import a dataset that is already on the cluster.
The path to the data must be a valid path for each node in the H2O cluster. If some node in the H2O cluster
cannot see the file, then an exception will be thrown by the H2O cluster. Does a parallel/distributed
multi-threaded pull of the data. The main difference between this method and :func:`upload_file` is that
the latter works with local files, whereas this method imports remote files (i.e. files local to the server).
If you running H2O server on your own machine, then both methods behave the same.
:param path: path(s) specifying the location of the data to import or a path to a directory of files to import
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will be
automatically generated.
:param parse: If True, the file should be parsed after import. If False, then a list is returned containing the file path.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param sep: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param col_names: A list of column names for the file.
:param col_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. The possible types a column may have are:
:param partition_by Names of the column the persisted dataset has been partitioned by.
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:param pattern: Character string containing a regular expression to match file(s) in the folder if `path` is a
directory.
:param skipped_columns: an integer list of column indices to skip and not parsed into the final frame from the import file.
:param custom_non_data_line_markers: If a line in imported file starts with any character in given string it will NOT be imported. Empty string means all lines are imported, None means that default behaviour for given format will be used
:param quotechar: A hint for the parser which character to expect as quoting character. Only single quote, double quote or None (default) are allowed. None means automatic detection.
:param escapechar: (Optional) One ASCII character used to escape other characters.
:returns: a new :class:`H2OFrame` instance.
:examples:
>>> birds = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/birds.csv")
"""
coltype = U(None, "unknown", "uuid", "string", "float", "real", "double", "int", "numeric",
"categorical", "factor", "enum", "time")
natype = U(str, [str])
assert_is_type(path, str, [str])
assert_is_type(pattern, str, None)
assert_is_type(destination_frame, str, None)
assert_is_type(parse, bool)
assert_is_type(header, -1, 0, 1)
assert_is_type(sep, None, I(str, lambda s: len(s) == 1))
assert_is_type(col_names, [str], None)
assert_is_type(col_types, [coltype], {str: coltype}, None)
assert_is_type(na_strings, [natype], {str: natype}, None)
assert_is_type(partition_by, None, [str], str)
assert_is_type(quotechar, None, U("'", '"'))
assert_is_type(escapechar, None, I(str, lambda s: len(s) == 1))
assert isinstance(skipped_columns, (type(None), list)), "The skipped_columns should be an list of column names!"
check_frame_id(destination_frame)
patharr = path if isinstance(path, list) else [path]
if any(os.path.split(p)[0] == "~" for p in patharr):
raise H2OValueError("Paths relative to a current user (~) are not valid in the server environment. "
"Please use absolute paths if possible.")
if not parse:
return lazy_import(path, pattern)
else:
return H2OFrame()._import_parse(path, pattern, destination_frame, header, sep, col_names, col_types, na_strings,
skipped_columns, custom_non_data_line_markers, partition_by, quotechar, escapechar)
def load_grid(grid_file_path, load_params_references=False):
"""
Loads previously saved grid with all its models from the same folder
:param grid_file_path: A string containing the path to the file with grid saved.
Grid models are expected to be in the same folder.
:param load_params_references: If true will attemt to reload saved objects referenced by grid parameters
(e.g. training frame, calibration frame), will fail if grid was saved without referenced objects.
:return: An instance of H2OGridSearch
:examples:
>>> from collections import OrderedDict
>>> from h2o.grid.grid_search import H2OGridSearch
>>> from h2o.estimators.gbm import H2OGradientBoostingEstimator
>>> train = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
# Run GBM Grid Search
>>> ntrees_opts = [1, 3]
>>> learn_rate_opts = [0.1, 0.01, .05]
>>> hyper_parameters = OrderedDict()
>>> hyper_parameters["learn_rate"] = learn_rate_opts
>>> hyper_parameters["ntrees"] = ntrees_opts
>>> export_dir = pyunit_utils.locate("results")
>>> gs = H2OGridSearch(H2OGradientBoostingEstimator, hyper_params=hyper_parameters)
>>> gs.train(x=list(range(4)), y=4, training_frame=train)
>>> grid_id = gs.grid_id
>>> old_grid_model_count = len(gs.model_ids)
# Save the grid search to the export directory
>>> saved_path = h2o.save_grid(export_dir, grid_id)
>>> h2o.remove_all();
>>> train = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
# Load the grid searcht-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
>>> grid = h2o.load_grid(saved_path)
>>> grid.train(x=list(range(4)), y=4, training_frame=train)
"""
assert_is_type(grid_file_path, str)
response = api(
"POST /3/Grid.bin/import",
{"grid_path": grid_file_path, "load_params_references": load_params_references}
)
return get_grid(response["name"])
def save_grid(grid_directory, grid_id, save_params_references=False, export_cross_validation_predictions=False):
"""
Export a Grid and it's all its models into the given folder
:param grid_directory: A string containing the path to the folder for the grid to be saved to.
:param grid_id: A character string with identification of the Grid in H2O.
:param save_params_references: True if objects referenced by grid parameters
(e.g. training frame, calibration frame) should also be saved.
:param export_cross_validation_predictions: A boolean flag indicating whether the models exported from the grid
should be saved with CV Holdout Frame predictions. Default is not to export the predictions.
:examples:
>>> from collections import OrderedDict
>>> from h2o.grid.grid_search import H2OGridSearch
>>> from h2o.estimators.gbm import H2OGradientBoostingEstimator
>>> train = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
# Run GBM Grid Search
>>> ntrees_opts = [1, 3]
>>> learn_rate_opts = [0.1, 0.01, .05]
>>> hyper_parameters = OrderedDict()
>>> hyper_parameters["learn_rate"] = learn_rate_opts
>>> hyper_parameters["ntrees"] = ntrees_opts
>>> export_dir = pyunit_utils.locate("results")
>>> gs = H2OGridSearch(H2OGradientBoostingEstimator, hyper_params=hyper_parameters)
>>> gs.train(x=list(range(4)), y=4, training_frame=train)
>>> grid_id = gs.grid_id
>>> old_grid_model_count = len(gs.model_ids)
# Save the grid search to the export directory
>>> saved_path = h2o.save_grid(export_dir, grid_id)
>>> h2o.remove_all();
>>> train = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
# Load the grid search
>>> grid = h2o.load_grid(saved_path)
>>> grid.train(x=list(range(4)), y=4, training_frame=train)
"""
assert_is_type(grid_directory, str)
assert_is_type(grid_id, str)
assert_is_type(save_params_references, bool)
assert_is_type(export_cross_validation_predictions, bool)
params = {
"grid_directory": grid_directory,
"save_params_references": save_params_references,
"export_cross_validation_predictions": export_cross_validation_predictions
}
api("POST /3/Grid.bin/" + grid_id + "/export", params)
return grid_directory + "/" + grid_id
def import_hive_table(database=None, table=None, partitions=None, allow_multi_format=False):
"""
Import Hive table to H2OFrame in memory.
Make sure to start H2O with Hive on classpath. Uses hive-site.xml on classpath to connect to Hive.
When database is specified as jdbc URL uses Hive JDBC driver to obtain table metadata. then
uses direct HDFS access to import data.
:param database: Name of Hive database (default database will be used by default), can be also a JDBC URL.
:param table: name of Hive table to import
:param partitions: a list of lists of strings - partition key column values of partitions you want to import.
:param allow_multi_format: enable import of partitioned tables with different storage formats used. WARNING:
this may fail on out-of-memory for tables with a large number of small partitions.
:returns: an :class:`H2OFrame` containing data of the specified Hive table.
:examples:
>>> basic_import = h2o.import_hive_table("default",
... "table_name")
>>> jdbc_import = h2o.import_hive_table("jdbc:hive2://hive-server:10000/default",
... "table_name")
>>> multi_format_enabled = h2o.import_hive_table("default",
... "table_name",
... allow_multi_format=True)
>>> with_partition_filter = h2o.import_hive_table("jdbc:hive2://hive-server:10000/default",
... "table_name",
... [["2017", "02"]])
"""
assert_is_type(database, str, None)
assert_is_type(table, str)
assert_is_type(partitions, [[str]], None)
p = { "database": database, "table": table, "partitions": partitions, "allow_multi_format": allow_multi_format }
j = H2OJob(api("POST /3/ImportHiveTable", data=p), "Import Hive Table").poll()
return get_frame(j.dest_key)
def import_sql_table(connection_url, table, username, password, columns=None, optimize=True,
fetch_mode=None, num_chunks_hint=None):
"""
Import SQL table to H2OFrame in memory.
Assumes that the SQL table is not being updated and is stable.
Runs multiple SELECT SQL queries concurrently for parallel ingestion.
Be sure to start the h2o.jar in the terminal with your downloaded JDBC driver in the classpath::
java -cp <path_to_h2o_jar>:<path_to_jdbc_driver_jar> water.H2OApp
Also see :func:`import_sql_select`.
Currently supported SQL databases are MySQL, PostgreSQL, MariaDB, Hive, Oracle and Microsoft SQL.
:param connection_url: URL of the SQL database connection as specified by the Java Database Connectivity (JDBC)
Driver. For example, "jdbc:mysql://localhost:3306/menagerie?&useSSL=false"
:param table: name of SQL table
:param columns: a list of column names to import from SQL table. Default is to import all columns.
:param username: username for SQL server
:param password: password for SQL server
:param optimize: DEPRECATED. Ignored - use fetch_mode instead. Optimize import of SQL table for faster imports.
:param fetch_mode: Set to DISTRIBUTED to enable distributed import. Set to SINGLE to force a sequential read by a single node
from the database.
:param num_chunks_hint: Desired number of chunks for the target Frame.
:returns: an :class:`H2OFrame` containing data of the specified SQL table.
:examples:
>>> conn_url = "jdbc:mysql://172.16.2.178:3306/ingestSQL?&useSSL=false"
>>> table = "citibike20k"
>>> username = "root"
>>> password = "abc123"
>>> my_citibike_data = h2o.import_sql_table(conn_url, table, username, password)
"""
assert_is_type(connection_url, str)
assert_is_type(table, str)
assert_is_type(username, str)
assert_is_type(password, str)
assert_is_type(columns, [str], None)
assert_is_type(optimize, bool)
assert_is_type(fetch_mode, str, None)
assert_is_type(num_chunks_hint, int, None)
p = {"connection_url": connection_url, "table": table, "username": username, "password": password,
"fetch_mode": fetch_mode, "num_chunks_hint": num_chunks_hint}
if columns:
p["columns"] = ", ".join(columns)
j = H2OJob(api("POST /99/ImportSQLTable", data=p), "Import SQL Table").poll()
return get_frame(j.dest_key)
def import_sql_select(connection_url, select_query, username, password, optimize=True,
use_temp_table=None, temp_table_name=None, fetch_mode=None, num_chunks_hint=None):
"""
Import the SQL table that is the result of the specified SQL query to H2OFrame in memory.
Creates a temporary SQL table from the specified sql_query.
Runs multiple SELECT SQL queries on the temporary table concurrently for parallel ingestion, then drops the table.
Be sure to start the h2o.jar in the terminal with your downloaded JDBC driver in the classpath::
java -cp <path_to_h2o_jar>:<path_to_jdbc_driver_jar> water.H2OApp
Also see h2o.import_sql_table. Currently supported SQL databases are MySQL, PostgreSQL, MariaDB, Hive, Oracle
and Microsoft SQL Server.
:param connection_url: URL of the SQL database connection as specified by the Java Database Connectivity (JDBC)
Driver. For example, "jdbc:mysql://localhost:3306/menagerie?&useSSL=false"
:param select_query: SQL query starting with `SELECT` that returns rows from one or more database tables.
:param username: username for SQL server
:param password: password for SQL server
:param optimize: DEPRECATED. Ignored - use fetch_mode instead. Optimize import of SQL table for faster imports.
:param use_temp_table: whether a temporary table should be created from select_query
:param temp_table_name: name of temporary table to be created from select_query
:param fetch_mode: Set to DISTRIBUTED to enable distributed import. Set to SINGLE to force a sequential read by a single node
from the database.
:param num_chunks_hint: Desired number of chunks for the target Frame.
:returns: an :class:`H2OFrame` containing data of the specified SQL query.
:examples:
>>> conn_url = "jdbc:mysql://172.16.2.178:3306/ingestSQL?&useSSL=false"
>>> select_query = "SELECT bikeid from citibike20k"
>>> username = "root"
>>> password = "abc123"
>>> my_citibike_data = h2o.import_sql_select(conn_url, select_query,
... username, password)
"""
assert_is_type(connection_url, str)
assert_is_type(select_query, str)
assert_is_type(username, str)
assert_is_type(password, str)
assert_is_type(optimize, bool)
assert_is_type(use_temp_table, bool, None)
assert_is_type(temp_table_name, str, None)
assert_is_type(fetch_mode, str, None)
assert_is_type(num_chunks_hint, int, None)
p = {"connection_url": connection_url, "select_query": select_query, "username": username, "password": password,
"use_temp_table": use_temp_table, "temp_table_name": temp_table_name, "fetch_mode": fetch_mode,
"num_chunks_hint": num_chunks_hint}
j = H2OJob(api("POST /99/ImportSQLTable", data=p), "Import SQL Table").poll()
return get_frame(j.dest_key)
def parse_setup(raw_frames, destination_frame=None, header=0, separator=None, column_names=None,
column_types=None, na_strings=None, skipped_columns=None, custom_non_data_line_markers=None,
partition_by=None, quotechar=None, escapechar=None):
"""
Retrieve H2O's best guess as to what the structure of the data file is.
During parse setup, the H2O cluster will make several guesses about the attributes of
the data. This method allows a user to perform corrective measures by updating the
returning dictionary from this method. This dictionary is then fed into `parse_raw` to
produce the H2OFrame instance.
:param raw_frames: a collection of imported file frames
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will
automatically be generated.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param separator: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param column_names: A list of column names for the file. If skipped_columns are specified, only list column names
of columns that are not skipped.
:param column_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. If skipped_columns are specified, only list column types of columns that are not skipped.
The possible types a column may have are:
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:param skipped_columns: an integer lists of column indices to skip and not parsed into the final frame from the import file.
:param custom_non_data_line_markers: If a line in imported file starts with any character in given string it will NOT be imported. Empty string means all lines are imported, None means that default behaviour for given format will be used
:param partition_by: A list of columns the dataset has been partitioned by. None by default.
:param quotechar: A hint for the parser which character to expect as quoting character. Only single quote, double quote or None (default) are allowed. None means automatic detection.
:param escapechar: (Optional) One ASCII character used to escape other characters.
:returns: a dictionary containing parse parameters guessed by the H2O backend.
:examples:
>>> col_headers = ["ID","CAPSULE","AGE","RACE",
... "DPROS","DCAPS","PSA","VOL","GLEASON"]
>>> col_types=['enum','enum','numeric','enum',
... 'enum','enum','numeric','numeric','numeric']
>>> hex_key = "training_data.hex"
>>> fraw = h2o.import_file(("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip"),
... parse=False)
>>> setup = h2o.parse_setup(fraw,
... destination_frame=hex_key,
... header=1,
... separator=',',
... column_names=col_headers,
... column_types=col_types,
... na_strings=["NA"])
>>> setup
"""
coltype = U(None, "unknown", "uuid", "string", "float", "real", "double", "int", "long", "numeric",
"categorical", "factor", "enum", "time")
natype = U(str, [str])
assert_is_type(raw_frames, str, [str])
assert_is_type(destination_frame, None, str)
assert_is_type(header, -1, 0, 1)
assert_is_type(separator, None, I(str, lambda s: len(s) == 1))
assert_is_type(column_names, [str], None)
assert_is_type(column_types, [coltype], {str: coltype}, None)
assert_is_type(na_strings, [natype], {str: natype}, None)
assert_is_type(partition_by, None, [str], str)
assert_is_type(quotechar, None, U("'", '"'))
assert_is_type(escapechar, None, I(str, lambda s: len(s) == 1))
check_frame_id(destination_frame)
# The H2O backend only accepts things that are quoted
if is_type(raw_frames, str): raw_frames = [raw_frames]
# temporary dictionary just to pass the following information to the parser: header, separator
kwargs = {"check_header": header, "source_frames": [quoted(frame_id) for frame_id in raw_frames],
"single_quotes": quotechar == "'"}
if separator:
kwargs["separator"] = ord(separator)
if escapechar:
kwargs["escapechar"] = ord(escapechar)
if custom_non_data_line_markers is not None:
kwargs["custom_non_data_line_markers"] = custom_non_data_line_markers
if partition_by is not None:
kwargs["partition_by"] = partition_by
j = api("POST /3/ParseSetup", data=kwargs)
if "warnings" in j and j["warnings"]:
for w in j["warnings"]:
warnings.warn(w)
# TODO: really should be url encoding...
if destination_frame:
j["destination_frame"] = destination_frame
parse_column_len = len(j["column_types"]) if skipped_columns is None else (len(j["column_types"])-len(skipped_columns))
tempColumnNames = j["column_names"] if j["column_names"] is not None else gen_header(j["number_columns"])
useType = [True]*len(tempColumnNames)
if skipped_columns is not None:
useType = [True]*len(tempColumnNames)
for ind in range(len(tempColumnNames)):
if ind in skipped_columns:
useType[ind]=False
if column_names is not None:
if not isinstance(column_names, list): raise ValueError("col_names should be a list")
if (skipped_columns is not None) and len(skipped_columns)>0:
if (len(column_names)) != parse_column_len:
raise ValueError(
"length of col_names should be equal to the number of columns parsed: %d vs %d"
% (len(column_names), parse_column_len))
else:
if len(column_names) != len(j["column_types"]): raise ValueError(
"length of col_names should be equal to the number of columns: %d vs %d"
% (len(column_names), len(j["column_types"])))
j["column_names"] = column_names
counter = 0
for ind in range(len(tempColumnNames)):
if useType[ind]:
tempColumnNames[ind]=column_names[counter]
counter=counter+1
if (column_types is not None): # keep the column types to include all columns
if isinstance(column_types, dict):
# overwrite dictionary to ordered list of column types. if user didn't specify column type for all names,
# use type provided by backend
if j["column_names"] is None: # no colnames discovered! (C1, C2, ...)
j["column_names"] = gen_header(j["number_columns"])
if not set(column_types.keys()).issubset(set(j["column_names"])): raise ValueError(
"names specified in col_types is not a subset of the column names")
idx = 0
column_types_list = []
for name in tempColumnNames: # column_names may have already been changed
if name in column_types:
column_types_list.append(column_types[name])
else:
column_types_list.append(j["column_types"][idx])
idx += 1
column_types = column_types_list
elif isinstance(column_types, list):
if len(column_types) != parse_column_len: raise ValueError(
"length of col_types should be equal to the number of parsed columns")
# need to expand it out to all columns, not just the parsed ones
column_types_list = j["column_types"]
counter = 0
for ind in range(len(j["column_types"])):
if useType[ind] and (column_types[counter]!=None):
column_types_list[ind]=column_types[counter]
counter=counter+1
column_types = column_types_list
else: # not dictionary or list
raise ValueError("col_types should be a list of types or a dictionary of column names to types")
j["column_types"] = column_types
if na_strings is not None:
if isinstance(na_strings, dict):
# overwrite dictionary to ordered list of lists of na_strings
if not j["column_names"]: raise ValueError("column names should be specified")
if not set(na_strings.keys()).issubset(set(j["column_names"])): raise ValueError(
"names specified in na_strings is not a subset of the column names")
j["na_strings"] = [[] for _ in range(len(j["column_names"]))]
for name, na in na_strings.items():
idx = j["column_names"].index(name)
if is_type(na, str): na = [na]
for n in na: j["na_strings"][idx].append(quoted(n))
elif is_type(na_strings, [[str]]):
if len(na_strings) != len(j["column_types"]):
raise ValueError("length of na_strings should be equal to the number of columns")
j["na_strings"] = [[quoted(na) for na in col] if col is not None else [] for col in na_strings]
elif isinstance(na_strings, list):
j["na_strings"] = [[quoted(na) for na in na_strings]] * len(j["column_types"])
else: # not a dictionary or list
raise ValueError(
"na_strings should be a list, a list of lists (one list per column), or a dictionary of column "
"names to strings which are to be interpreted as missing values")
if skipped_columns is not None:
if isinstance(skipped_columns, list):
j["skipped_columns"] = []
for colidx in skipped_columns:
if (colidx < 0): raise ValueError("skipped column index cannot be negative")
j["skipped_columns"].append(colidx)
# quote column names and column types also when not specified by user
if j["column_names"]: j["column_names"] = list(map(quoted, j["column_names"]))
j["column_types"] = list(map(quoted, j["column_types"]))
return j
def parse_raw(setup, id=None, first_line_is_header=0):
"""
Parse dataset using the parse setup structure.
:param setup: Result of ``h2o.parse_setup()``
:param id: an id for the frame.
:param first_line_is_header: -1, 0, 1 if the first line is to be used as the header
:returns: an :class:`H2OFrame` object.
:examples:
>>> fraw = h2o.import_file(("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip"),
... parse=False)
>>> fhex = h2o.parse_raw(h2o.parse_setup(fraw),
... id='prostate.csv',
... first_line_is_header=0)
>>> fhex.summary()
"""
assert_is_type(setup, dict)
assert_is_type(id, str, None)
assert_is_type(first_line_is_header, -1, 0, 1)
check_frame_id(id)
if id:
setup["destination_frame"] = id
if first_line_is_header != (-1, 0, 1):
if first_line_is_header not in (-1, 0, 1): raise ValueError("first_line_is_header should be -1, 0, or 1")
setup["check_header"] = first_line_is_header
fr = H2OFrame()
fr._parse_raw(setup)
return fr
def assign(data, xid):
"""
(internal) Assign new id to the frame.
:param data: an H2OFrame whose id should be changed
:param xid: new id for the frame.
:returns: the passed frame.
:examples:
>>> old_name = "prostate.csv"
>>> new_name = "newProstate.csv"
>>> training_data = h2o.import_file(("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip"),
... destination_frame=old_name)
>>> temp=h2o.assign(training_data, new_name)
"""
assert_is_type(data, H2OFrame)
assert_is_type(xid, str)
assert_satisfies(xid, xid != data.frame_id)
check_frame_id(xid)
data._ex = ExprNode("assign", xid, data)._eval_driver(None)
data._ex._cache._id = xid
data._ex._children = None
return data
def deep_copy(data, xid):
"""
Create a deep clone of the frame ``data``.
:param data: an H2OFrame to be cloned
:param xid: (internal) id to be assigned to the new frame.
:returns: new :class:`H2OFrame` which is the clone of the passed frame.
:examples:
>>> training_data = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip")
>>> new_name = "new_frame"
>>> training_copy = h2o.deep_copy(training_data, new_name)
>>> training_copy
"""
assert_is_type(data, H2OFrame)
assert_is_type(xid, str)
assert_satisfies(xid, xid != data.frame_id)
check_frame_id(xid)
duplicate = data.apply(lambda x: x)
duplicate._ex = ExprNode("assign", xid, duplicate)._eval_driver(None)
duplicate._ex._cache._id = xid
duplicate._ex._children = None
return duplicate
def models():
"""
Retrieve the IDs all the Models.
:returns: Handles of all the models present in the cluster
:examples:
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> airlines["Year"]= airlines["Year"].asfactor()
>>> airlines["Month"]= airlines["Month"].asfactor()
>>> airlines["DayOfWeek"] = airlines["DayOfWeek"].asfactor()
>>> airlines["Cancelled"] = airlines["Cancelled"].asfactor()
>>> airlines['FlightNum'] = airlines['FlightNum'].asfactor()
>>> model1 = H2OGeneralizedLinearEstimator(family="binomial")
>>> model1.train(y=response, training_frame=airlines)
>>> model2 = H2OXGBoostEstimator(family="binomial")
>>> model2.train(y=response, training_frame=airlines)
>>> model_list = h2o.get_models()
"""
return [json["model_id"]["name"] for json in api("GET /3/Models")["models"]]
def get_model(model_id):
"""
Load a model from the server.
:param model_id: The model identification in H2O
:returns: Model object, a subclass of H2OEstimator
:examples:
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> airlines["Year"]= airlines["Year"].asfactor()
>>> airlines["Month"]= airlines["Month"].asfactor()
>>> airlines["DayOfWeek"] = airlines["DayOfWeek"].asfactor()
>>> airlines["Cancelled"] = airlines["Cancelled"].asfactor()
>>> airlines['FlightNum'] = airlines['FlightNum'].asfactor()
>>> predictors = ["Origin", "Dest", "Year", "UniqueCarrier",
... "DayOfWeek", "Month", "Distance", "FlightNum"]
>>> response = "IsDepDelayed"
>>> model = H2OGeneralizedLinearEstimator(family="binomial",
... alpha=0,
... Lambda=1e-5)
>>> model.train(x=predictors,
... y=response,
... training_frame=airlines)
>>> model2 = h2o.get_model(model.model_id)
"""
assert_is_type(model_id, str)
model_json = api("GET /3/Models/%s" % model_id)["models"][0]
algo = model_json["algo"]
# still some special handling for AutoEncoder: would be cleaner if we could get rid of this
if algo == 'deeplearning' and model_json["output"]["model_category"] == "AutoEncoder":
algo = 'autoencoder'
m = create_estimator(algo)
m._resolve_model(model_id, model_json)
return m
def get_grid(grid_id):
"""
Return the specified grid.
:param grid_id: The grid identification in h2o
:returns: an :class:`H2OGridSearch` instance.
:examples:
>>> from h2o.grid.grid_search import H2OGridSearch
>>> from h2o.estimators import H2OGradientBoostingEstimator
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> x = ["DayofMonth", "Month"]
>>> hyper_parameters = {'learn_rate':[0.1,0.2],
... 'max_depth':[2,3],
... 'ntrees':[5,10]}
>>> search_crit = {'strategy': "RandomDiscrete",
... 'max_models': 5,
... 'seed' : 1234,
... 'stopping_metric' : "AUTO",
... 'stopping_tolerance': 1e-2}
>>> air_grid = H2OGridSearch(H2OGradientBoostingEstimator,
... hyper_params=hyper_parameters,
... search_criteria=search_crit)
>>> air_grid.train(x=x,
... y="IsDepDelayed",
... training_frame=airlines,
... distribution="bernoulli")
>>> fetched_grid = h2o.get_grid(str(air_grid.grid_id))
>>> fetched_grid
"""
assert_is_type(grid_id, str)
grid_json = api("GET /99/Grids/%s" % grid_id)
models = [get_model(key["name"]) for key in grid_json["model_ids"]]
# get first model returned in list of models from grid search to get model class (binomial, multinomial, etc)
first_model_json = api("GET /3/Models/%s" % grid_json["model_ids"][0]["name"])["models"][0]
gs = H2OGridSearch(None, {}, grid_id)
gs._resolve_grid(grid_id, grid_json, first_model_json)
gs.models = models
hyper_params = {param: set() for param in gs.hyper_names}
for param in gs.hyper_names:
for model in models:
if isinstance(model.full_parameters[param]["actual_value"], list):
hyper_params[param].add(model.full_parameters[param]["actual_value"][0])
else:
hyper_params[param].add(model.full_parameters[param]["actual_value"])
hyper_params = {str(param): list(vals) for param, vals in hyper_params.items()}
gs.hyper_params = hyper_params
gs.model = model.__class__()
return gs
def get_frame(frame_id, **kwargs):
"""
Obtain a handle to the frame in H2O with the frame_id key.
:param str frame_id: id of the frame to retrieve.
:returns: an :class:`H2OFrame` object
:examples:
>>> from h2o.frame import H2OFrame
>>> frame1 = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/gbm_test/titanic.csv")
>>> frame2 = h2o.get_frame(frame1.frame_id)
"""
assert_is_type(frame_id, str)
return H2OFrame.get_frame(frame_id, **kwargs)
def no_progress():
"""
Disable the progress bar from flushing to stdout.
The completed progress bar is printed when a job is complete so as to demarcate a log file.
:examples:
>>> h2o.no_progress()
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> x = ["DayofMonth", "Month"]
>>> model = H2OGeneralizedLinearEstimator(family="binomial",
... alpha=0,
... Lambda=1e-5)
>>> model.train(x=x, y="IsDepDelayed", training_frame=airlines)
"""
H2OJob.__PROGRESS_BAR__ = False
def show_progress():
"""Enable the progress bar (it is enabled by default).
:examples:
>>> h2o.no_progress()
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> x = ["DayofMonth", "Month"]
>>> model = H2OGeneralizedLinearEstimator(family="binomial",
... alpha=0,
... Lambda=1e-5)
>>> model.train(x=x, y="IsDepDelayed", training_frame=airlines)
>>> h2o.show_progress()
>>> model.train(x=x, y="IsDepDelayed", training_frame=airlines)
"""
H2OJob.__PROGRESS_BAR__ = True
def enable_expr_optimizations(flag):
"""Enable expression tree local optimizations.
:examples:
>>> h2o.enable_expr_optimizations(True)
"""
ExprNode.__ENABLE_EXPR_OPTIMIZATIONS__ = flag
def is_expr_optimizations_enabled():
"""
:examples:
>>> h2o.enable_expr_optimizations(True)
>>> h2o.is_expr_optimizations_enabled()
>>> h2o.enable_expr_optimizations(False)
>>> h2o.is_expr_optimizations_enabled()
"""
return ExprNode.__ENABLE_EXPR_OPTIMIZATIONS__
def log_and_echo(message=""):
"""
Log a message on the server-side logs.
This is helpful when running several pieces of work one after the other on a single H2O
cluster and you want to make a notation in the H2O server side log where one piece of
work ends and the next piece of work begins.
Sends a message to H2O for logging. Generally used for debugging purposes.
:param message: message to write to the log.
:examples:
>>> ret = h2o.log_and_echo("Testing h2o.log_and_echo")
"""
assert_is_type(message, str)
api("POST /3/LogAndEcho", data={"message": str(message)})
def remove(x, cascade=True):
"""
Remove object(s) from H2O.
:param x: H2OFrame, H2OEstimator, or string, or a list of those things: the object(s) or unique id(s)
pointing to the object(s) to be removed.
:param cascade: boolean, if set to TRUE (default), the object dependencies (e.g. submodels) are also removed.
:examples:
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> h2o.remove(airlines)
>>> airlines
# Should receive error: "This H2OFrame has been removed."
"""
item_type = U(str, Keyed)
assert_is_type(x, item_type, [item_type])
if not isinstance(x, list): x = [x]
for xi in x:
if isinstance(xi, H2OFrame):
if xi.key is None: return # Lazy frame, never evaluated, nothing in cluster
rapids("(rm {})".format(xi.key))
xi.detach()
elif isinstance(xi, Keyed):
api("DELETE /3/DKV/%s" % xi.key, data=dict(cascade=cascade))
xi.detach()
else:
# string may be a Frame key name part of a rapids session... need to call rm thru rapids here
try:
rapids("(rm {})".format(xi))
except:
api("DELETE /3/DKV/%s" % xi, data=dict(cascade=cascade))
def remove_all(retained=None):
"""
Removes all objects from H2O with possibility to specify models and frames to retain.
Retained keys must be keys of models and frames only. For models retained, training and validation frames are retained as well.
Cross validation models of a retained model are NOT retained automatically, those must be specified explicitely.
:param retained: Keys of models and frames to retain
:examples:
>>> from h2o.estimators import H2OGradientBoostingEstimator
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> gbm = H2OGradientBoostingEstimator(ntrees = 1)
>>> gbm.train(x = ["Origin", "Dest"],
... y = "IsDepDelayed",
... training_frame=airlines)
>>> h2o.remove_all([airlines.frame_id,
... gbm.model_id])
"""
params = {"retained_keys": retained}
api(endpoint="DELETE /3/DKV", data=params)
def rapids(expr):
"""
Execute a Rapids expression.
:param expr: The rapids expression (ascii string).
:returns: The JSON response (as a python dictionary) of the Rapids execution.
:examples:
>>> rapidTime = h2o.rapids("(getTimeZone)")["string"]
>>> print(str(rapidTime))
"""
assert_is_type(expr, str)
return ExprNode.rapids(expr)
def ls():
"""List keys on an H2O Cluster.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
>>> h2o.ls()
"""
return H2OFrame._expr(expr=ExprNode("ls")).as_data_frame(use_pandas=True)
def frame(frame_id):
"""
Retrieve metadata for an id that points to a Frame.
:param frame_id: the key of a Frame in H2O.
:returns: dict containing the frame meta-information.
:examples:
>>> training_data = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> frame_summary = h2o.frame(training_data.frame_id)
>>> frame_summary
"""
assert_is_type(frame_id, str)
return api("GET /3/Frames/%s" % frame_id)
def frames():
"""
Retrieve all the Frames.
:returns: Meta information on the frames
:examples:
>>> arrestsH2O = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> h2o.frames()
"""
return api("GET /3/Frames")
def download_pojo(model, path="", get_jar=True, jar_name=""):
"""
Download the POJO for this model to the directory specified by path; if path is "", then dump to screen.
:param model: the model whose scoring POJO should be retrieved.
:param path: an absolute path to the directory where POJO should be saved.
:param get_jar: retrieve the h2o-genmodel.jar also (will be saved to the same folder ``path``).
:param jar_name: Custom name of genmodel jar.
:returns: location of the downloaded POJO file.
:examples:
>>> h2o_df = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip")
>>> h2o_df['CAPSULE'] = h2o_df['CAPSULE'].asfactor()
>>> from h2o.estimators.glm import H2OGeneralizedLinearEstimator
>>> binomial_fit = H2OGeneralizedLinearEstimator(family = "binomial")
>>> binomial_fit.train(y = "CAPSULE",
... x = ["AGE", "RACE", "PSA", "GLEASON"],
... training_frame = h2o_df)
>>> h2o.download_pojo(binomial_fit, path='', get_jar=False)
"""
assert_is_type(model, ModelBase)
assert_is_type(path, str)
assert_is_type(get_jar, bool)
if not model.have_pojo:
raise H2OValueError("Export to POJO not supported")
path = str(os.path.join(path, ''))
if path == "":
java_code = api("GET /3/Models.java/%s" % model.model_id)
print(java_code)
return None
else:
filename = api("GET /3/Models.java/%s" % model.model_id, save_to=path)
if get_jar:
if jar_name == "":
api("GET /3/h2o-genmodel.jar", save_to=os.path.join(path, "h2o-genmodel.jar"))
else:
api("GET /3/h2o-genmodel.jar", save_to=os.path.join(path, jar_name))
return filename
def download_csv(data, filename):
"""
Download an H2O data set to a CSV file on the local disk.
Warning: Files located on the H2O server may be very large! Make sure you have enough
hard drive space to accommodate the entire file.
:param data: an H2OFrame object to be downloaded.
:param filename: name for the CSV file where the data should be saved to.
:examples:
>>> iris = h2o.load_dataset("iris")
>>> h2o.download_csv(iris, "iris_delete.csv")
>>> iris2 = h2o.import_file("iris_delete.csv")
>>> iris2 = h2o.import_file("iris_delete.csv")
"""
assert_is_type(data, H2OFrame)
assert_is_type(filename, str)
return api("GET /3/DownloadDataset?frame_id=%s&hex_string=false" % data.frame_id, save_to=filename)
def download_all_logs(dirname=".", filename=None, container=None):
"""
Download H2O log files to disk.
:param dirname: a character string indicating the directory that the log file should be saved in.
:param filename: a string indicating the name that the CSV file should be.
Note that the default container format is .zip, so the file name must include the .zip extension.
:param container: a string indicating how to archive the logs, choice of "ZIP" (default) and "LOG"
ZIP: individual log files archived in a ZIP package
LOG: all log files will be concatenated together in one text file
:returns: path of logs written in a zip file.
:examples: The following code will save the zip file `'h2o_log.zip'` in a directory that is one down from where you are currently working into a directory called `your_directory_name`. (Please note that `your_directory_name` should be replaced with the name of the directory that you've created and that already exists.)
>>> h2o.download_all_logs(dirname='./your_directory_name/', filename = 'h2o_log.zip')
"""
assert_is_type(dirname, str)
assert_is_type(filename, str, None)
assert_is_type(container, "ZIP", "LOG", None)
type = "/%s" % container if container else ""
def save_to(resp):
path = os.path.join(dirname, filename if filename else h2oconn.save_to_detect(resp))
print("Writing H2O logs to " + path)
return path
return api("GET /3/Logs/download%s" % type, save_to=save_to)
def save_model(model, path="", force=False, export_cross_validation_predictions=False):
"""
Save an H2O Model object to disk. (Note that ensemble binary models can now be saved using this method.)
The owner of the file saved is the user by which H2O cluster was executed.
:param model: The model object to save.
:param path: a path to save the model at (hdfs, s3, local)
:param force: if True overwrite destination directory in case it exists, or throw exception if set to False.
:param export_cross_validation_predictions: logical, indicates whether the exported model
artifact should also include CV Holdout Frame predictions. Default is not to export the predictions.
:returns: the path of the saved model
:examples:
>>> from h2o.estimators.glm import H2OGeneralizedLinearEstimator
>>> h2o_df = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip")
>>> my_model = H2OGeneralizedLinearEstimator(family = "binomial")
>>> my_model.train(y = "CAPSULE",
... x = ["AGE", "RACE", "PSA", "GLEASON"],
... training_frame = h2o_df)
>>> h2o.save_model(my_model, path='', force=True)
"""
assert_is_type(model, ModelBase)
assert_is_type(path, str)
assert_is_type(force, bool)
assert_is_type(export_cross_validation_predictions, bool)
path = os.path.join(os.getcwd() if path == "" else path, model.model_id)
data = {"dir": path, "force": force, "export_cross_validation_predictions": export_cross_validation_predictions}
return api("GET /99/Models.bin/%s" % model.model_id, data=data)["dir"]
def download_model(model, path="", export_cross_validation_predictions=False):
"""
Download an H2O Model object to the machine this python session is currently connected to.
The owner of the file saved is the user by which python session was executed.
:param model: The model object to download.
:param path: a path to the directory where the model should be saved.
:param export_cross_validation_predictions: logical, indicates whether the exported model
artifact should also include CV Holdout Frame predictions. Default is not to include the predictions.
:returns: the path of the downloaded model
:examples:
>>> from h2o.estimators.glm import H2OGeneralizedLinearEstimator
>>> h2o_df = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip")
>>> my_model = H2OGeneralizedLinearEstimator(family = "binomial")
>>> my_model.train(y = "CAPSULE",
... x = ["AGE", "RACE", "PSA", "GLEASON"],
... training_frame = h2o_df)
>>> h2o.download_model(my_model, path='')
"""
assert_is_type(model, ModelBase)
assert_is_type(path, str)
assert_is_type(export_cross_validation_predictions, bool)
path = os.path.join(os.getcwd() if path == "" else path, model.model_id)
return api("GET /3/Models.fetch.bin/%s" % model.model_id,
data={"export_cross_validation_predictions": export_cross_validation_predictions},
save_to=path)
def upload_model(path):
"""
Upload a binary model from the provided local path to the H2O cluster.
(H2O model can be saved in a binary form either by save_model() or by download_model() function.)
:param path: A path on the machine this python session is currently connected to, specifying the location of the model to upload.
:returns: a new :class:`H2OEstimator` object.
"""
response = api("POST /3/PostFile.bin", filename=path)
frame_key = response["destination_frame"]
res = api("POST /99/Models.upload.bin/%s" % "", data={"dir": frame_key})
return get_model(res["models"][0]["model_id"]["name"])
def load_model(path):
"""
Load a saved H2O model from disk. (Note that ensemble binary models can now be loaded using this method.)
:param path: the full path of the H2O Model to be imported.
:returns: an :class:`H2OEstimator` object
:examples:
>>> training_data = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> predictors = ["Origin", "Dest", "Year", "UniqueCarrier",
... "DayOfWeek", "Month", "Distance", "FlightNum"]
>>> response = "IsDepDelayed"
>>> model = H2OGeneralizedLinearEstimator(family="binomial",
... alpha=0,
... Lambda=1e-5)
>>> model.train(x=predictors,
... y=response,
... training_frame=training_data)
>>> h2o.save_model(model, path='', force=True)
>>> h2o.load_model(model)
"""
assert_is_type(path, str)
res = api("POST /99/Models.bin/%s" % "", data={"dir": path})
return get_model(res["models"][0]["model_id"]["name"])
def export_file(frame, path, force=False, sep=",", compression=None, parts=1, header=True, quote_header=True, parallel=False):
"""
Export a given H2OFrame to a path on the machine this python session is currently connected to.
:param frame: the Frame to save to disk.
:param path: the path to the save point on disk.
:param force: if True, overwrite any preexisting file with the same path.
:param sep: field delimiter for the output file.
:param compression: how to compress the exported dataset (default none; gzip, bzip2 and snappy available)
:param parts: enables export to multiple 'part' files instead of just a single file.
Convenient for large datasets that take too long to store in a single file.
Use parts=-1 to instruct H2O to determine the optimal number of part files or
specify your desired maximum number of part files. Path needs to be a directory
when exporting to multiple files, also that directory must be empty.
Default is ``parts = 1``, which is to export to a single file.
:param header: if True, write out column names in the header line.
:param quote_header: if True, quote column names in the header.
:param parallel: use a parallel export to a single file (doesn't apply when num_parts != 1,
might create temporary files in the destination directory).
:examples:
>>> h2o_df = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/prostate/prostate.csv")
>>> h2o_df['CAPSULE'] = h2o_df['CAPSULE'].asfactor()
>>> rand_vec = h2o_df.runif(1234)
>>> train = h2o_df[rand_vec <= 0.8]
>>> valid = h2o_df[(rand_vec > 0.8) & (rand_vec <= 0.9)]
>>> test = h2o_df[rand_vec > 0.9]
>>> binomial_fit = H2OGeneralizedLinearEstimator(family = "binomial")
>>> binomial_fit.train(y = "CAPSULE",
... x = ["AGE", "RACE", "PSA", "GLEASON"],
... training_frame = train, validation_frame = valid)
>>> pred = binomial_fit.predict(test)
>>> h2o.export_file(pred, "/tmp/pred.csv", force = True)
"""
assert_is_type(frame, H2OFrame)
assert_is_type(path, str)
assert_is_type(sep, I(str, lambda s: len(s) == 1))
assert_is_type(force, bool)
assert_is_type(parts, int)
assert_is_type(compression, str, None)
assert_is_type(header, bool)
assert_is_type(quote_header, bool)
assert_is_type(parallel, bool)
H2OJob(api("POST /3/Frames/%s/export" % (frame.frame_id),
data={"path": path, "num_parts": parts, "force": force,
"compression": compression, "separator": ord(sep),
"header": header, "quote_header": quote_header, "parallel": parallel}), "Export File").poll()
def load_frame(frame_id, path, force=True):
"""
Load frame previously stored in H2O's native format.
This will load a data frame from file-system location. Stored data can be loaded only with a cluster of the same
size and same version the the one which wrote the data. The provided directory must be accessible from all nodes
(HDFS, NFS). Provided frame_id must be the same as the one used when writing the data.
:param frame_id: the frame ID of the original frame
:param path: a filesystem location where to look for frame data
:param force: overwrite an already existing frame (defaults to true)
:returns: A Frame object.
:examples:
>>> iris = h2o.load_frame("iris_weather.hex", "hdfs://namenode/h2o_data")
"""
H2OJob(api(
"POST /3/Frames/load",
data={"frame_id": frame_id, "dir": path, "force": force}
), "Load frame data").poll()
return get_frame(frame_id)
def cluster():
"""Return :class:`H2OCluster` object describing the backend H2O cluster.
:examples:
>>> import h2o
>>> h2o.init()
>>> h2o.cluster()
"""
return h2oconn.cluster if h2oconn else None
def create_frame(frame_id=None, rows=10000, cols=10, randomize=True,
real_fraction=None, categorical_fraction=None, integer_fraction=None,
binary_fraction=None, time_fraction=None, string_fraction=None,
value=0, real_range=100, factors=100, integer_range=100,
binary_ones_fraction=0.02, missing_fraction=0.01,
has_response=False, response_factors=2, positive_response=False,
seed=None, seed_for_column_types=None):
"""
Create a new frame with random data.
Creates a data frame in H2O with real-valued, categorical, integer, and binary columns specified by the user.
:param frame_id: the destination key. If empty, this will be auto-generated.
:param rows: the number of rows of data to generate.
:param cols: the number of columns of data to generate. Excludes the response column if has_response is True.
:param randomize: If True, data values will be randomly generated. This must be True if either
categorical_fraction or integer_fraction is non-zero.
:param value: if randomize is False, then all real-valued entries will be set to this value.
:param real_range: the range of randomly generated real values.
:param real_fraction: the fraction of columns that are real-valued.
:param categorical_fraction: the fraction of total columns that are categorical.
:param factors: the number of (unique) factor levels in each categorical column.
:param integer_fraction: the fraction of total columns that are integer-valued.
:param integer_range: the range of randomly generated integer values.
:param binary_fraction: the fraction of total columns that are binary-valued.
:param binary_ones_fraction: the fraction of values in a binary column that are set to 1.
:param time_fraction: the fraction of randomly created date/time columns.
:param string_fraction: the fraction of randomly created string columns.
:param missing_fraction: the fraction of total entries in the data frame that are set to NA.
:param has_response: A logical value indicating whether an additional response column should be prepended to the
final H2O data frame. If set to True, the total number of columns will be ``cols + 1``.
:param response_factors: if has_response is True, then this variable controls the type of the "response" column:
setting response_factors to 1 will generate real-valued response, any value greater or equal than 2 will
create categorical response with that many categories.
:param positive_reponse: when response variable is present and of real type, this will control whether it
contains positive values only, or both positive and negative.
:param seed: a seed used to generate random values when ``randomize`` is True.
:param seed_for_column_types: a seed used to generate random column types when ``randomize`` is True.
:returns: an :class:`H2OFrame` object
:examples:
>>> dataset_params = {}
>>> dataset_params['rows'] = random.sample(list(range(50,150)),1)[0]
>>> dataset_params['cols'] = random.sample(list(range(3,6)),1)[0]
>>> dataset_params['categorical_fraction'] = round(random.random(),1)
>>> left_over = (1 - dataset_params['categorical_fraction'])
>>> dataset_params['integer_fraction'] =
... round(left_over - round(random.uniform(0,left_over),1),1)
>>> if dataset_params['integer_fraction'] + dataset_params['categorical_fraction'] == 1:
... if dataset_params['integer_fraction'] >
... dataset_params['categorical_fraction']:
... dataset_params['integer_fraction'] =
... dataset_params['integer_fraction'] - 0.1
... else:
... dataset_params['categorical_fraction'] =
... dataset_params['categorical_fraction'] - 0.1
>>> dataset_params['missing_fraction'] = random.uniform(0,0.5)
>>> dataset_params['has_response'] = False
>>> dataset_params['randomize'] = True
>>> dataset_params['factors'] = random.randint(2,5)
>>> print("Dataset parameters: {0}".format(dataset_params))
>>> distribution = random.sample(['bernoulli','multinomial',
... 'gaussian','poisson','gamma'], 1)[0]
>>> if distribution == 'bernoulli': dataset_params['response_factors'] = 2
... elif distribution == 'gaussian': dataset_params['response_factors'] = 1
... elif distribution == 'multinomial': dataset_params['response_factors'] = random.randint(3,5)
... else:
... dataset_params['has_response'] = False
>>> print("Distribution: {0}".format(distribution))
>>> train = h2o.create_frame(**dataset_params)
"""
t_fraction = U(None, BoundNumeric(0, 1))
assert_is_type(frame_id, str, None)
assert_is_type(rows, BoundInt(1))
assert_is_type(cols, BoundInt(1))
assert_is_type(randomize, bool)
assert_is_type(value, numeric)
assert_is_type(real_range, BoundNumeric(0))
assert_is_type(real_fraction, t_fraction)
assert_is_type(categorical_fraction, t_fraction)
assert_is_type(integer_fraction, t_fraction)
assert_is_type(binary_fraction, t_fraction)
assert_is_type(time_fraction, t_fraction)
assert_is_type(string_fraction, t_fraction)
assert_is_type(missing_fraction, t_fraction)
assert_is_type(binary_ones_fraction, t_fraction)
assert_is_type(factors, BoundInt(1))
assert_is_type(integer_range, BoundInt(1))
assert_is_type(has_response, bool)
assert_is_type(response_factors, None, BoundInt(1))
assert_is_type(positive_response, bool)
assert_is_type(seed, int, None)
assert_is_type(seed_for_column_types, int, None)
check_frame_id(frame_id)
if randomize and value:
raise H2OValueError("Cannot set data to a `value` if `randomize` is true")
if (categorical_fraction or integer_fraction) and not randomize:
raise H2OValueError("`randomize` should be True when either categorical or integer columns are used.")
# The total column fraction that the user has specified explicitly. This sum should not exceed 1. We will respect
# all explicitly set fractions, and will auto-select the remaining fractions.
frcs = [real_fraction, categorical_fraction, integer_fraction, binary_fraction, time_fraction, string_fraction]
wgts = [0.5, 0.2, 0.2, 0.1, 0.0, 0.0]
sum_explicit_fractions = sum(0 if f is None else f for f in frcs)
count_explicit_fractions = sum(0 if f is None else 1 for f in frcs)
remainder = 1 - sum_explicit_fractions
if sum_explicit_fractions >= 1 + 1e-10:
raise H2OValueError("Fractions of binary, integer, categorical, time and string columns should add up "
"to a number less than 1.")
elif sum_explicit_fractions >= 1 - 1e-10:
# The fractions already add up to almost 1. No need to do anything (the server will absorb the tiny
# remainder into the real_fraction column).
pass
else:
# sum_explicit_fractions < 1 => distribute the remainder among the columns that were not set explicitly
if count_explicit_fractions == 6:
raise H2OValueError("Fraction of binary, integer, categorical, time and string columns add up to a "
"number less than 1.")
# Each column type receives a certain part (proportional to column's "weight") of the remaining fraction.
sum_implicit_weights = sum(wgts[i] if frcs[i] is None else 0 for i in range(6))
for i, f in enumerate(frcs):
if frcs[i] is not None: continue
if sum_implicit_weights == 0:
frcs[i] = remainder
else:
frcs[i] = remainder * wgts[i] / sum_implicit_weights
remainder -= frcs[i]
sum_implicit_weights -= wgts[i]
for i, f in enumerate(frcs):
if f is None:
frcs[i] = 0
real_fraction, categorical_fraction, integer_fraction, binary_fraction, time_fraction, string_fraction = frcs
parms = {"dest": frame_id if frame_id else py_tmp_key(append=h2oconn.session_id),
"rows": rows,
"cols": cols,
"randomize": randomize,
"categorical_fraction": categorical_fraction,
"integer_fraction": integer_fraction,
"binary_fraction": binary_fraction,
"time_fraction": time_fraction,
"string_fraction": string_fraction,
# "real_fraction" is not provided, the backend computes it as 1 - sum(5 other fractions)
"value": value,
"real_range": real_range,
"factors": factors,
"integer_range": integer_range,
"binary_ones_fraction": binary_ones_fraction,
"missing_fraction": missing_fraction,
"has_response": has_response,
"response_factors": response_factors,
"positive_response": positive_response,
"seed": -1 if seed is None else seed,
"seed_for_column_types": -1 if seed_for_column_types is None else seed_for_column_types,
}
H2OJob(api("POST /3/CreateFrame", data=parms), "Create Frame").poll()
return get_frame(parms["dest"])
def interaction(data, factors, pairwise, max_factors, min_occurrence, destination_frame=None):
"""
Categorical Interaction Feature Creation in H2O.
Creates a frame in H2O with n-th order interaction features between categorical columns, as specified by
the user.
:param data: the H2OFrame that holds the target categorical columns.
:param factors: factor columns (either indices or column names).
:param pairwise: If True, create pairwise interactions between factors (otherwise create one
higher-order interaction). Only applicable if there are 3 or more factors.
:param max_factors: Max. number of factor levels in pair-wise interaction terms (if enforced, one extra
catch-all factor will be made).
:param min_occurrence: Min. occurrence threshold for factor levels in pair-wise interaction terms
:param destination_frame: a string indicating the destination key. If empty, this will be auto-generated by H2O.
:returns: :class:`H2OFrame`
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
>>> iris = iris.cbind(iris[4] == "Iris-setosa")
>>> iris[5] = iris[5].asfactor()
>>> iris.set_name(5,"C6")
>>> iris = iris.cbind(iris[4] == "Iris-virginica")
>>> iris[6] = iris[6].asfactor()
>>> iris.set_name(6, name="C7")
>>> two_way_interactions = h2o.interaction(iris,
... factors=[4,5,6],
... pairwise=True,
... max_factors=10000,
... min_occurrence=1)
>>> from h2o.utils.typechecks import assert_is_type
>>> assert_is_type(two_way_interactions, H2OFrame)
>>> levels1 = two_way_interactions.levels()[0]
>>> levels2 = two_way_interactions.levels()[1]
>>> levels3 = two_way_interactions.levels()[2]
>>> two_way_interactions
"""
assert_is_type(data, H2OFrame)
assert_is_type(factors, [str, int])
assert_is_type(pairwise, bool)
assert_is_type(max_factors, int)
assert_is_type(min_occurrence, int)
assert_is_type(destination_frame, str, None)
factors = [data.names[n] if is_type(n, int) else n for n in factors]
parms = {"dest": py_tmp_key(append=h2oconn.session_id) if destination_frame is None else destination_frame,
"source_frame": data.frame_id,
"factor_columns": [quoted(f) for f in factors],
"pairwise": pairwise,
"max_factors": max_factors,
"min_occurrence": min_occurrence,
}
H2OJob(api("POST /3/Interaction", data=parms), "Interactions").poll()
return get_frame(parms["dest"])
def as_list(data, use_pandas=True, header=True):
"""
Convert an H2O data object into a python-specific object.
WARNING! This will pull all data local!
If Pandas is available (and use_pandas is True), then pandas will be used to parse the
data frame. Otherwise, a list-of-lists populated by character data will be returned (so
the types of data will all be str).
:param data: an H2O data object.
:param use_pandas: If True, try to use pandas for reading in the data.
:param header: If True, return column names as first element in list
:returns: List of lists (Rows x Columns).
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
>>> from h2o.utils.typechecks import assert_is_type
>>> res1 = h2o.as_list(iris, use_pandas=False)
>>> assert_is_type(res1, list)
>>> res1 = list(zip(*res1))
>>> assert abs(float(res1[0][9]) - 4.4) < 1e-10 and abs(float(res1[1][9]) - 2.9) < 1e-10 and \
... abs(float(res1[2][9]) - 1.4) < 1e-10, "incorrect values"
>>> res1
"""
assert_is_type(data, H2OFrame)
assert_is_type(use_pandas, bool)
assert_is_type(header, bool)
return H2OFrame.as_data_frame(data, use_pandas=use_pandas, header=header)
def demo(funcname, interactive=True, echo=True, test=False):
"""
H2O built-in demo facility.
:param funcname: A string that identifies the h2o python function to demonstrate.
:param interactive: If True, the user will be prompted to continue the demonstration after every segment.
:param echo: If True, the python commands that are executed will be displayed.
:param test: If True, `h2o.init()` will not be called (used for pyunit testing).
:example:
>>> import h2o
>>> h2o.demo("gbm")
"""
import h2o.demos as h2odemo
assert_is_type(funcname, str)
assert_is_type(interactive, bool)
assert_is_type(echo, bool)
assert_is_type(test, bool)
demo_function = getattr(h2odemo, funcname, None)
if demo_function and type(demo_function) is type(demo):
demo_function(interactive, echo, test)
else:
print("Demo for %s is not available." % funcname)
def load_dataset(relative_path):
"""Imports a data file within the 'h2o_data' folder.
:examples:
>>> fr = h2o.load_dataset("iris")
"""
assert_is_type(relative_path, str)
h2o_dir = os.path.split(__file__)[0]
for possible_file in [os.path.join(h2o_dir, relative_path),
os.path.join(h2o_dir, "h2o_data", relative_path),
os.path.join(h2o_dir, "h2o_data", relative_path + ".csv")]:
if os.path.exists(possible_file):
return upload_file(possible_file)
# File not found -- raise an error!
raise H2OValueError("Data file %s cannot be found" % relative_path)
def make_metrics(predicted, actual, domain=None, distribution=None, weights=None, auc_type="NONE"):
"""
Create Model Metrics from predicted and actual values in H2O.
:param H2OFrame predicted: an H2OFrame containing predictions.
:param H2OFrame actuals: an H2OFrame containing actual values.
:param domain: list of response factors for classification.
:param distribution: distribution for regression.
:param H2OFrame weights: an H2OFrame containing observation weights (optional).
:param auc_type: auc For multinomial classification you have to specify which type of agregated AUC/AUCPR
will be used to calculate this metric. Possibilities are MACRO_OVO, MACRO_OVR, WEIGHTED_OVO, WEIGHTED_OVR,
NONE and AUTO (OVO = One vs. One, OVR = One vs. Rest). Default is "NONE" (AUC and AUCPR are not calculated).
:examples:
>>> fr = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip")
>>> fr["CAPSULE"] = fr["CAPSULE"].asfactor()
>>> fr["RACE"] = fr["RACE"].asfactor()
>>> response = "AGE"
>>> predictors = list(set(fr.names) - {"ID", response})
>>> for distr in ["gaussian", "poisson", "laplace", "gamma"]:
... print("distribution: %s" % distr)
... model = H2OGradientBoostingEstimator(distribution=distr,
... ntrees=2,
... max_depth=3,
... min_rows=1,
... learn_rate=0.1,
... nbins=20)
... model.train(x=predictors,
... y=response,
... training_frame=fr)
... predicted = h2o.assign(model.predict(fr), "pred")
... actual = fr[response]
... m0 = model.model_performance(train=True)
... m1 = h2o.make_metrics(predicted, actual, distribution=distr)
... m2 = h2o.make_metrics(predicted, actual)
>>> print(m0)
>>> print(m1)
>>> print(m2)
"""
assert_is_type(predicted, H2OFrame)
assert_is_type(actual, H2OFrame)
assert_is_type(weights, H2OFrame, None)
assert actual.ncol == 1, "`actual` frame should have exactly 1 column"
assert_is_type(distribution, str, None)
assert_satisfies(actual.ncol, actual.ncol == 1)
assert_is_type(auc_type, str)
allowed_auc_types = ["MACRO_OVO", "MACRO_OVR", "WEIGHTED_OVO", "WEIGHTED_OVR", "AUTO", "NONE"]
assert auc_type in allowed_auc_types, "auc_type should be "+(" ".join([str(type) for type in allowed_auc_types]))
if domain is None and any(actual.isfactor()):
domain = actual.levels()[0]
params = {"domain": domain, "distribution": distribution}
if weights is not None:
params["weights_frame"] = weights.frame_id
params["auc_type"] = auc_type
res = api("POST /3/ModelMetrics/predictions_frame/%s/actuals_frame/%s" % (predicted.frame_id, actual.frame_id),
data=params)
return res["model_metrics"]
def flow():
"""
Open H2O Flow in your browser.
:examples:
>>> python
>>> import h2o
>>> h2o.init()
>>> h2o.flow()
"""
webbrowser.open(connection().base_url, new = 1)
def _put_key(file_path, dest_key=None, overwrite=True):
"""
Upload given file into DKV and save it under give key as raw object.
:param dest_key: name of destination key in DKV
:param file_path: path to file to upload
:return: key name if object was uploaded successfully
"""
ret = api("POST /3/PutKey?destination_key={}&overwrite={}".format(dest_key if dest_key else '', overwrite),
filename=file_path)
return ret["destination_key"]
def _create_zip_file(dest_filename, *content_list):
from .utils.shared_utils import InMemoryZipArch
with InMemoryZipArch(dest_filename) as zip_arch:
for filename, file_content in content_list:
zip_arch.append(filename, file_content)
return dest_filename
def _inspect_methods_separately(obj):
import inspect
class_def = "class {}:\n".format(obj.__name__)
for name, member in inspect.getmembers(obj):
if inspect.ismethod(member):
class_def += inspect.getsource(member)
elif inspect.isfunction(member):
class_def += inspect.getsource(member)
return class_def
def _default_source_provider(obj):
import inspect
# First try to get source code via inspect
try:
return ' '.join(inspect.getsourcelines(obj)[0])
except (OSError, TypeError, IOError):
# It seems like we are in interactive shell and
# we do not have access to class source code directly
# At this point we can:
# (1) get IPython history and find class definition, or
# (2) compose body of class from methods, since it is still possible to get
# method body
return _inspect_methods_separately(obj)
def _default_custom_distribution_source_provider(obj):
from h2o.utils.distributions import CustomDistributionGeneric
if CustomDistributionGeneric in obj.mro():
return _inspect_methods_separately(obj)
else:
return _default_source_provider(obj)
def upload_custom_metric(func, func_file="metrics.py", func_name=None, class_name=None, source_provider=None):
"""
Upload given metrics function into H2O cluster.
The metrics can have different representation:
- class: needs to implement map(pred, act, weight, offset, model), reduce(l, r) and metric(l) methods
- string: the same as in class case, but the class is given as a string
:param func: metric representation: string, class
:param func_file: internal name of file to save given metrics representation
:param func_name: name for h2o key under which the given metric is saved
:param class_name: name of class wrapping the metrics function (when supplied as string)
:param source_provider: a function which provides a source code for given function
:return: reference to uploaded metrics function
:examples:
>>> class CustomMaeFunc:
>>> def map(self, pred, act, w, o, model):
>>> return [abs(act[0] - pred[0]), 1]
>>>
>>> def reduce(self, l, r):
>>> return [l[0] + r[0], l[1] + r[1]]
>>>
>>> def metric(self, l):
>>> return l[0] / l[1]
>>>
>>> custom_func_str = '''class CustomMaeFunc:
>>> def map(self, pred, act, w, o, model):
>>> return [abs(act[0] - pred[0]), 1]
>>>
>>> def reduce(self, l, r):
>>> return [l[0] + r[0], l[1] + r[1]]
>>>
>>> def metric(self, l):
>>> return l[0] / l[1]'''
>>>
>>>
>>> h2o.upload_custom_metric(custom_func_str, class_name="CustomMaeFunc", func_name="mae")
"""
import tempfile
import inspect
# Use default source provider
if not source_provider:
source_provider = _default_source_provider
# The template wraps given metrics representation
_CFUNC_CODE_TEMPLATE = """# Generated code
import water.udf.CMetricFunc as MetricFunc
# User given metric function as a class implementing
# 3 methods defined by interface CMetricFunc
{}
# Generated user metric which satisfies the interface
# of Java MetricFunc
class {}Wrapper({}, MetricFunc, object):
pass
"""
assert_satisfies(func, inspect.isclass(func) or isinstance(func, str),
"The argument func needs to be string or class !")
assert_satisfies(func_file, func_file is not None,
"The argument func_file is missing!")
assert_satisfies(func_file, func_file.endswith('.py'),
"The argument func_file needs to end with '.py'")
code = None
derived_func_name = None
module_name = func_file[:-3]
if isinstance(func, str):
assert_satisfies(class_name, class_name is not None,
"The argument class_name is missing! " +
"It needs to reference the class in given string!")
code = _CFUNC_CODE_TEMPLATE.format(func, class_name, class_name)
derived_func_name = "metrics_{}".format(class_name)
class_name = "{}.{}Wrapper".format(module_name, class_name)
else:
assert_satisfies(func, inspect.isclass(func), "The parameter `func` should be str or class")
for method in ['map', 'reduce', 'metric']:
assert_satisfies(func, method in func.__dict__, "The class `func` needs to define method `{}`".format(method))
assert_satisfies(class_name, class_name is None,
"If class is specified then class_name parameter needs to be None")
class_name = "{}.{}Wrapper".format(module_name, func.__name__)
derived_func_name = "metrics_{}".format(func.__name__)
code = _CFUNC_CODE_TEMPLATE.format(source_provider(func), func.__name__, func.__name__)
# If the func name is not given, use whatever we can derived from given definition
if not func_name:
func_name = derived_func_name
# Saved into jar file
tmpdir = tempfile.mkdtemp(prefix="h2o-func")
func_arch_file = _create_zip_file("{}/func.jar".format(tmpdir), (func_file, code))
# Upload into K/V
dest_key = _put_key(func_arch_file, dest_key=func_name)
# Reference
return "python:{}={}".format(dest_key, class_name)
def upload_custom_distribution(func, func_file="distributions.py", func_name=None, class_name=None, source_provider=None):
import tempfile
import inspect
# Use default source provider
if not source_provider:
source_provider = _default_custom_distribution_source_provider
# The template wraps given metrics representation
_CFUNC_CODE_TEMPLATE = """# Generated code
import water.udf.CDistributionFunc as DistributionFunc
# User given metric function as a class implementing
# 4 methods defined by interface CDistributionFunc
{}
# Generated user distribution which satisfies the interface
# of Java DistributionFunc
class {}Wrapper({}, DistributionFunc, object):
pass
"""
assert_satisfies(func, inspect.isclass(func) or isinstance(func, str),
"The argument func needs to be string or class !")
assert_satisfies(func_file, func_file is not None,
"The argument func_file is missing!")
assert_satisfies(func_file, func_file.endswith('.py'),
"The argument func_file needs to end with '.py'")
code = None
derived_func_name = None
module_name = func_file[:-3]
if isinstance(func, str):
assert_satisfies(class_name, class_name is not None,
"The argument class_name is missing! " +
"It needs to reference the class in given string!")
code = _CFUNC_CODE_TEMPLATE.format(func, class_name, class_name)
derived_func_name = "distributions_{}".format(class_name)
class_name = "{}.{}Wrapper".format(module_name, class_name)
else:
assert_satisfies(func, inspect.isclass(func), "The parameter `func` should be str or class")
for method in ['link', 'init', 'gamma', 'gradient']:
assert_satisfies(func, method in dir(func), "The class `func` needs to define method `{}`".format(method))
assert_satisfies(class_name, class_name is None,
"If class is specified then class_name parameter needs to be None")
class_name = "{}.{}Wrapper".format(module_name, func.__name__)
derived_func_name = "distributions_{}".format(func.__name__)
code = _CFUNC_CODE_TEMPLATE.format(source_provider(func), func.__name__, func.__name__)
# If the func name is not given, use whatever we can derived from given definition
if not func_name:
func_name = derived_func_name
# Saved into jar file
tmpdir = tempfile.mkdtemp(prefix="h2o-func")
func_arch_file = _create_zip_file("{}/func.jar".format(tmpdir), (func_file, code))
# Upload into K/V
dest_key = _put_key(func_arch_file, dest_key=func_name)
# Reference
return "python:{}={}".format(dest_key, class_name)
def import_mojo(mojo_path):
"""
Imports an existing MOJO model as an H2O model.
:param mojo_path: Path to the MOJO archive on the H2O's filesystem
:return: An H2OGenericEstimator instance embedding given MOJO
:examples:
>>> from h2o.estimators import H2OGradientBoostingEstimator
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> model = H2OGradientBoostingEstimator(ntrees = 1)
>>> model.train(x = ["Origin", "Dest"],
... y = "IsDepDelayed",
... training_frame=airlines)
>>> original_model_filename = tempfile.mkdtemp()
>>> original_model_filename = model.download_mojo(original_model_filename)
>>> mojo_model = h2o.import_mojo(original_model_filename)
"""
if mojo_path == None:
raise TypeError("MOJO path may not be None")
mojo_estimator = H2OGenericEstimator.from_file(mojo_path)
print(mojo_estimator)
return mojo_estimator
def upload_mojo(mojo_path):
"""
Uploads an existing MOJO model from local filesystem into H2O and imports it as an H2O Generic Model.
:param mojo_path: Path to the MOJO archive on the user's local filesystem
:return: An H2OGenericEstimator instance embedding given MOJO
:examples:
>>> from h2o.estimators import H2OGradientBoostingEstimator
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> model = H2OGradientBoostingEstimator(ntrees = 1)
>>> model.train(x = ["Origin", "Dest"],
... y = "IsDepDelayed",
... training_frame=airlines)
>>> original_model_filename = tempfile.mkdtemp()
>>> original_model_filename = model.download_mojo(original_model_filename)
>>> mojo_model = h2o.upload_mojo(original_model_filename)
"""
response = api("POST /3/PostFile", filename=mojo_path)
frame_key = response["destination_frame"]
mojo_estimator = H2OGenericEstimator(model_key = get_frame(frame_key))
mojo_estimator.train()
print(mojo_estimator)
return mojo_estimator
def print_mojo(mojo_path, format="json", tree_index=None):
"""
Generates string representation of an existing MOJO model.
:param mojo_path: Path to the MOJO archive on the user's local filesystem
:param format: Output format. Possible values: json (default), dot, png
:param tree_index: Index of tree to print
:return: An string representation of the MOJO for text output formats,
a path to a directory with the rendered images for image output formats
(or a path to a file if only a single tree is outputted)
:example:
>>> import json
>>> from h2o.estimators.gbm import H2OGradientBoostingEstimator
>>> prostate = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv")
>>> prostate["CAPSULE"] = prostate["CAPSULE"].asfactor()
>>> gbm_h2o = H2OGradientBoostingEstimator(ntrees = 5,
... learn_rate = 0.1,
... max_depth = 4,
... min_rows = 10)
>>> gbm_h2o.train(x = list(range(1,prostate.ncol)),
... y = "CAPSULE",
... training_frame = prostate)
>>> mojo_path = gbm_h2o.download_mojo()
>>> mojo_str = h2o.print_mojo(mojo_path)
>>> mojo_dict = json.loads(mojo_str)
"""
assert_is_type(mojo_path, str)
assert_is_type(format, str, None)
assert_satisfies(format, format in [None, "json", "dot", "png"])
assert_is_type(tree_index, int, None)
ls = H2OLocalServer()
jar = ls._find_jar()
java = ls._find_java()
if format is None:
format = "json"
is_image = format == "png"
output_file = tempfile.mkstemp(prefix="mojo_output")[1]
cmd = [java, "-cp", jar, "hex.genmodel.tools.PrintMojo", "--input", mojo_path, "--format", format,
"--output", output_file]
if tree_index is not None:
cmd += ["--tree", str(tree_index)]
try:
return_code = subprocess.call(cmd)
if is_image:
output = output_file
else:
with open(output_file, "r") as f:
output = f.read()
os.unlink(output_file)
except OSError as e:
traceback = getattr(e, "child_traceback", None)
raise H2OError("Unable to print MOJO: %s" % e, traceback)
if return_code == 0:
return output
else:
raise H2OError("Unable to print MOJO: %s" % output)
def estimate_cluster_mem(ncols, nrows, num_cols = 0, string_cols = 0, cat_cols = 0, time_cols = 0, uuid_cols = 0):
"""
Computes an estimate for cluster memory usage in GB.
Number of columns and number of rows are required. For a better estimate you can provide a counts of different
types of columns in the dataset.
:param ncols: total number of columns in a dataset. An required parameter, integer, can't be negative
:param nrows: total number of rows in a dataset. An required parameter, integer, can't be negative
:param num_cols: number of numeric columns in a dataset. Integer, can't be negative.
:param string_cols: number of string columns in a dataset. Integer, can't be negative.
:param cat_cols: number of categorical columns in a dataset. Integer, can't be negative.
:param time_cols: number of time columns in a dataset. Integer, can't be negative.
:param uuid_cols: number of uuid columns in a dataset. Integer, can't be negative.
:return: An memory estimate in GB.
:example:
>>> from h2o import estimate_cluster_mem
>>> ### I will load an parquet file with 18 columns and 2 million lines
>>> estimate_cluster_mem(18, 2000000)
>>> ### I will load an other parquet file with 16 columns and 2 million lines, I ask for a more precise estimate
>>> ### because I know 12 of 16 columns are categorical and one of 16 columns consist of uuids.
>>> estimate_cluster_mem(18, 2000000, cat_cols=12, uuid_cols=1)
>>> ### I will load an parquet file with 8 columns and 31 million lines, I ask for a more precise estimate
>>> ### because I know 4 of 8 columns are categorical and 4 of 8 columns consist of numbers.
>>> estimate_cluster_mem(ncols=8, nrows=31000000, cat_cols=4, num_cols=4)
"""
import math
if (ncols < 0):
raise ValueError("ncols can't be a negative number")
if (nrows < 0):
raise ValueError("nrows can't be a negative number")
if (num_cols < 0):
raise ValueError("num_cols can't be a negative number")
if (string_cols < 0):
raise ValueError("string_cols can't be a negative number")
if (cat_cols < 0):
raise ValueError("cat_cols can't be a negative number")
if (time_cols < 0):
raise ValueError("time_cols can't be a negative number")
if (uuid_cols < 0):
raise ValueError("uuid_cols can't be a negative number")
BASE_MEM_REQUIREMENT_MB = 32
SAFETY_FACTOR = 4
BYTES_IN_MB = 1024 * 1024
BYTES_IN_GB = 1024 * BYTES_IN_MB
known_cols = num_cols + string_cols + uuid_cols + cat_cols + time_cols
if (known_cols > ncols):
raise ValueError("There can not be more specific columns then columns in total")
unknown_cols = ncols - known_cols
unknown_size = 8
unknown_requirement = unknown_cols * nrows * unknown_size
num_size = 8
num_requirement = num_cols * nrows * num_size
string_size = 128
string_requirement = string_size * string_cols * nrows
uuid_size = 16
uuid_requirement = uuid_size * uuid_cols * nrows
cat_size = 2
cat_requirement = cat_size * cat_cols * nrows
time_size = 8
time_requirement = time_size * time_cols * nrows
data_requirement = unknown_requirement + num_requirement + string_requirement + uuid_requirement + cat_requirement + time_requirement
mem_req = (BASE_MEM_REQUIREMENT_MB * BYTES_IN_MB + data_requirement) * SAFETY_FACTOR / BYTES_IN_GB
return math.ceil(mem_req)
#-----------------------------------------------------------------------------------------------------------------------
# Private
#-----------------------------------------------------------------------------------------------------------------------
def _check_connection():
if not h2oconn or not h2oconn.cluster:
raise H2OConnectionError("Not connected to a cluster. Did you run `h2o.connect()`?")
def _connect_with_conf(conn_conf):
conf = conn_conf
if isinstance(conn_conf, dict):
conf = H2OConnectionConf(config=conn_conf)
assert_is_type(conf, H2OConnectionConf)
return connect(url=conf.url, verify_ssl_certificates=conf.verify_ssl_certificates, cacert=conf.cacert,
auth=conf.auth, proxy=conf.proxy, cookies=conf.cookies, verbose=conf.verbose)
#-----------------------------------------------------------------------------------------------------------------------
# ALL DEPRECATED METHODS BELOW
#-----------------------------------------------------------------------------------------------------------------------
# Deprecated since 2015-10-08
@deprecated_fn(replaced_by=import_file)
def import_frame():
pass
# Deprecated since 2015-10-08
@deprecated_fn("Deprecated (converted to a private method).")
def parse():
"""Deprecated."""
pass
# Deprecated since 2016-08-04
@deprecated_fn("Deprecated, use ``h2o.cluster().show_status()``.")
def cluster_info():
"""Deprecated."""
_check_connection()
cluster().show_status()
# Deprecated since 2016-08-04
@deprecated_fn("Deprecated, use ``h2o.cluster().show_status(True)``.")
def cluster_status():
"""Deprecated."""
_check_connection()
cluster().show_status(True)
# Deprecated since 2016-08-04
@deprecated_fn("Deprecated, use ``h2o.cluster().shutdown()``.")
def shutdown(prompt=False):
"""Deprecated."""
_check_connection()
cluster().shutdown(prompt)
# Deprecated since 2016-08-04
@deprecated_fn("Deprecated, use ``h2o.cluster().network_test()``.")
def network_test():
"""Deprecated."""
_check_connection()
cluster().network_test()
# Deprecated since 2016-08-04
@deprecated_fn("Deprecated, use ``h2o.cluster().timezone``.")
def get_timezone():
"""Deprecated."""
_check_connection()
return cluster().timezone
# Deprecated since 2016-08-04
@deprecated_fn("Deprecated, set ``h2o.cluster().timezone`` instead.")
def set_timezone(value):
"""Deprecated."""
_check_connection()
cluster().timezone = value
# Deprecated since 2016-08-04
@deprecated_fn("Deprecated, use ``h2o.cluster().list_timezones()``.")
def list_timezones():
"""Deprecated."""
_check_connection()
return cluster().list_timezones()
| apache-2.0 |
efharkin/stim-gen | stimgen.py | 1 | 18830 | """
STIMULUS GENERATOR
Created on Tue Sep 5 10:42:19 2017
@author: Emerson
Class with built-in methods for generating commonly used stimuli and writing
them to ATF files for use with AxonInstruments hardware.
Example usage:
# Initialize the class and simulate a synaptic current.
s = Stim('Slow EPSC')
s.generate_PS(duration = 200, ampli = 10, tau_rise = 1.5, tau_decay = 15)
# Display some information about the generated waveform.
print(s)
s.plot()
# Create a set of synaptic-like currents of increasing amplitude.
s.set_replicates(5)
s.command *= np.arange(1, 6)
s.plot()
# Write the stimulus to an ATF file.
s.write_ATF()
"""
#%% IMPORT PREREQUISITE MODULES
import numpy as np
import types
import numba as nb
import matplotlib.pyplot as plt
#%% DEFINE MAIN STIM CLASS
class Stim(object):
"""
Class with built-in methods for generating commonly used stimuli and writing them to ATF files for use with AxonInstruments hardware.
Attributes:
label -- string descriptor of the class instance.
stim_type -- string descriptor of the type of stimulus.
dt -- size of the time step in ms.
command -- 2D array containing stimuli; time across rows, sweeps across cols.
time -- time support vector.
stim_params -- object containing attributes for each stim parameter for the current stim_type.
Methods:
generate_PS -- generate a synaptic current/potential-like waveform, with total amplitude defined.
generate_PS_bycharge-- generates a synaptic current/potential-like waveform, with total charge defined.
generate_OU -- generate Ornstein-Uhlenbeck noise.
set_replicates -- set the number of replicates of the stimulus.
plot -- plot the stimulus.
write_ATF -- write the stimulus to an ATF file.
Example usage:
# Initialize the class and simulate a synaptic current.
s = Stim('Slow EPSC')
s.generate_PS(duration = 200, ampli = 10, tau_rise = 1.5, tau_decay = 15)
# Display some information about the generated waveform.
print(s)
s.plot()
# Create a set of synaptic-like currents of increasing amplitude.
s.set_replicates(5)
s.command *= np.arange(1, 6)
s.plot()
# Write the stimulus to an ATF file.
s.write_ATF()
"""
### MAGIC METHODS
# Initialize class instance.
def __init__(self, label, dt=0.1):
"""Initialize self."""
self.label = label
self.stim_type = 'Empty'
self.dt = dt # Sampling interval in ms.
self.command = None # Attribute to hold the command (only current is currently supported).
self.time = None # Attribute to hold a time support vector.
self.stim_params = None # Attribute to hold stim parameters for given stim_type
# Method for unambiguous representation of Stim instance.
def __repr__(self):
"""Return repr(self)."""
if self.time is not None:
time_range = '[{}, {}]'.format(self.time[0], self.time[-1])
command_str = np.array2string(self.command)
else:
time_range = str(self.time)
command_str = str(self.command)
output_ls = [
'Stim object\n\nLabel: ', self.label, '\nStim type: ',
self.stim_type, '\nTime range (ms): ', time_range,
'\nTime step (ms):', str(self.dt), '\nStim Parameters',
vars(self.stim_params), '\nCommand:\n', command_str
]
return ''.join(output_ls)
# Pretty print self.command and some important details.
# (Called by print().)
def __str__(self):
"""
Return str(self).
"""
# Include more details about the object if it isn't empty.
if self.command is not None:
header = '{} Stim object with {} sweeps of {}s each.\n\n'.format(
self.stim_type,
self.command.shape[1],
(self.time[-1] + self.dt) * self.dt / 1000
)
content = np.array2string(self.command)
footer_ls = ['Stim parameters are: ']
for key, value in vars(self.stim_params).items():
keyval_str = '\n\t{}: {}'.format(key, value)
footer_ls.append(keyval_str)
footer_ls.append('\n\n')
footer = ''.join(footer_ls)
else:
header = '{} Stim object.'.format(self.stim_type)
content = ''
footer = ''
output_ls = [str(self.label), '\n\n', header, footer, content]
return ''.join(output_ls)
### MAIN METHODS
# Generate a synaptic current-like waveform with defined amplitude
def generate_PS(self, duration, ampli, tau_rise, tau_decay):
"""
Generate a post-synaptic potential/current-like waveform.
Note that the rise and decay time constants are only good approximations of fitted rise/decay taus (which are more experimentally relevant) if the provided values are separated by at least approx. half an order of magnitude.
Inputs:
duration -- length of the simulated waveform in ms ^ -1.
ampli -- peak height of the waveform.
tau_rise -- time constant of the rising phase of the waveform in ms ^ -1.
tau_decay -- time constant of the falling phase of the waveform in ms ^ -1.
"""
# Initialize time support vector.
offset = 500
self.time = np.arange(0, duration, self.dt)
# Generate waveform based on time constants then normalize amplitude.
waveform = np.exp(-self.time/tau_decay) - np.exp(-self.time/tau_rise)
waveform /= np.max(waveform)
waveform *= ampli
# Convert waveform into a column vector.
waveform = np.concatenate(
(np.zeros((int(offset / self.dt))), waveform), axis = 0
)
waveform = waveform[np.newaxis].T
# Compute total charge transfer using the equation AUC = ampli * (tau_decay - tau_rise). (Derived from integrating PS equation from 0 to inf)
charge = ampli * (tau_decay - tau_rise)
# Assign output.
self.time = np.arange(0, duration + offset, self.dt)
self.command = waveform
self.stim_type = "Post-synaptic current-like"
self.stim_params = types.SimpleNamespace(
tau_rise = tau_rise, tau_decay = tau_decay,
ampli = ampli, charge = charge
)
# Generate a synaptic current-like waveform with defined area under curve (total charge transfer)
def generate_PS_bycharge(self, duration, charge, tau_rise, tau_decay):
"""
Generate a post-synaptic potential/current-like waveform.
Note that the rise and decay time constants are only good approximations of fitted rise/decay taus (which are more experimentally relevant) if the provided values are separated by at least approx. half an order of magnitude.
Inputs:
duration -- length of the simulated waveform in ms ^ -1.
charge -- total charge transfer in units of pA*ms
tau_rise -- time constant of the rising phase of the waveform in ms ^ -1.
tau_decay -- time constant of the falling phase of the waveform in ms ^ -1.
"""
# Initialize time support vector.
offset = 500
self.time = np.arange(0, duration, self.dt)
# Generate waveform based on time constants
waveform = np.exp(-self.time/tau_decay) - np.exp(-self.time/tau_rise)
# Calculate ratio between desired and current charge and use to normalize waveform
curr_charge = tau_decay - tau_rise
scalefactor_waveform = charge / curr_charge
waveform *= scalefactor_waveform
# Convert waveform into a column vector.
waveform = np.concatenate(
(np.zeros((int(offset / self.dt))), waveform), axis = 0
)
waveform = waveform[np.newaxis].T
# Compute amplitude of PS based on charge sign
if charge > 0:
ampli = np.max(waveform)
else:
ampli = np.min(waveform)
# Assign output.
self.time = np.arange(0, duration + offset, self.dt)
self.command = waveform
self.stim_type = "Post-synaptic current-like"
self.stim_params = types.SimpleNamespace(
tau_rise = tau_rise, tau_decay = tau_decay,
ampli = ampli, charge = charge
)
# Realize OU noise and assign to self.command. (Wrapper for _gen_OU_internal.)
def generate_OU(self, duration, I0, tau, sigma0, dsigma, sin_per):
"""
Realize Ornstein-Uhlenbeck noise.
Parameters are provided to allow the noise SD to vary sinusoidally over time.
sigma[t] = sigma0 * ( 1 + dsigma * sin(2pi * sin_freq)[t] )
Inputs:
duration -- duration of noise to realize in ms.
I0 -- mean value of the noise.
tau -- noise time constant in ms ^ -1.
sigma0 -- mean SD of the noise.
dsigma -- fractional permutation of noise SD.
sin_per -- period of the sinusoidal SD permutation in ms.
"""
# Initialize support vectors.
self.time = np.arange(0, duration, self.dt)
self.command = np.zeros(self.time.shape)
S = sigma0 * (1 + dsigma * np.sin((2 * np.pi / sin_per) * self.time))
rands = np.random.standard_normal( len(self.time) )
# Perform type conversions for vectors.
self.time.dtype = np.float64
self.command.dtype = np.float64
S.dtype = np.float64
rands.dtype = np.float64
# Perform type conversions for constants.
self.dt = np.float64(self.dt)
I0 = np.float64(I0)
tau = np.float64(tau)
# Realize noise using nb.jit-accelerated function.
noise = self._gen_OU_internal(
self.time, rands, self.dt, I0,
tau, S
)
# Convert noise to a column vector.
noise = noise[np.newaxis].T
# Assign output.
self.command = noise
self.stim_type = 'Ornstein-Uhlenbeck noise'
self.stim_params = types.SimpleNamespace(
I0 = I0, tau = tau, sigma0 = sigma0,
dsigma = dsigma, sin_per = sin_per
)
# Generate sinusoidal input
def generate_sin(self, duration, I0, ampli, period):
"""
Generate a sine wave with time-dependent amplitude and/or period.
Inputs:
duration -- duration of the wave in ms.
I0 -- offset of the wave.
ampli -- amplitude of the wave.
period -- period of the wave in ms.
Amplitude and/or period can be time-varied by passing one-dimensional vectors of length duration/dt instead of constants.
"""
# Initialize time support vector.
self.time = np.arange(0, duration, self.dt)
# Convert ampli to a vector if need be;
# otherwise check that it's the right shape.
try:
tmp = iter(ampli); del tmp # Verify that ampli is iterable.
assert len(ampli) == len(self.time)
except TypeError:
ampli = np.array([ampli] * len(self.time))
except AssertionError:
raise ValueError('len of ampli must correspond to duration.')
# Do the same with period.
try:
tmp = iter(period); del tmp # Verify that period is iterable.
assert len(period) == len(self.time)
except TypeError:
period = np.array([period] * len(self.time))
except AssertionError:
raise ValueError('len of period must correspond to duration.')
# Calculate the sine wave over time.
sinewave = I0 + ampli * np.sin((2 * np.pi / period) * self.time)
# Convert sine wave to column vector.
sinewave = sinewave[np.newaxis].T
# Assign output.
self.command = sinewave
self.stim_type = 'Sine wave'
self.stim_params = types.SimpleNamespace(
I0 = I0, ampli = ampli, period = period
)
@staticmethod
@nb.jit(nb.float64[:, :](nb.float64[:, :], nb.float64, nb.float64, nb.float64, nb.float64))
def _internal_V_integrator(input_, R, C, E, dt):
V = np.empty_like(input_)
for i in range(input_.shape[1]):
V[0, i] = E
for t in range(1, input_.shape[0]):
dV = ((-(V[t-1, i] - E)/R + input_[t, i])) * dt/C
V[t, i] = V[t-1, i] + dV
return V
# Simulate response of RC circuit.
def simulate_RC(self, R, C, E, plot = True, verbose = True):
"""
Simulate response of RC circuit to command.
Inputs:
R: float
-- Resistance of RC circuit in MOhm
C: float
-- Capacitance of RC circuit in pF
E: float
-- Equilibrium potential/reversal poential/resting potential of the cell in mV
plot: bool (default True)
-- Plot the integrated stimulation
verbose: bool (default True)
-- Print some helpful output. Set to False to run quietly.
"""
input_ = self.command.copy() * 1e-12 # Convert pA to A
dt_ = self.dt * 1e-3
R *= 1e6 # Convert R from MOhm to Ohm
C *= 1e-12 # Convert C to F from pF
E *= 1e-3 # Convert E from mV to V
if verbose: print('tau = {}ms'.format(R * C * 1e3))
if verbose: print('Integrating voltage...')
V = self._internal_V_integrator(input_, R, C, E, dt_)
V *= 1e3
if verbose: print('Done integrating voltage!')
if plot:
if verbose: print('Plotting...')
plt.figure()
t_vec = np.arange(0, int(input_.shape[0] * self.dt), self.dt)
ax = plt.subplot(211)
plt.plot(t_vec, V, 'k-')
plt.ylabel('Voltage (mV)')
plt.xlabel('Time (ms)')
plt.subplot(212, sharex = ax)
plt.plot(t_vec, input_ * 1e12, 'k-')
plt.ylabel('Command (pA)')
plt.xlabel('Time (ms)')
plt.show()
if verbose: print('Done!')
return V
# Set number of replicates of the command array.
def set_replicates(self, reps):
"""
Set number of replicates of the existing command array.
"""
# Check that command has been initialized.
try:
assert self.command is not None
except AssertionError:
raise RuntimeError('No command array to replicate!')
# Create replicates by tiling.
self.command = np.tile(self.command, (1, reps))
self.stim_params.array_replicates = reps
# Plot command, time, and additional data.
def plot(self, **data):
"""
Plot command (and any additional data) over time.
Produces a plot of self.command over self.time as its primary output.
Additional data of interest may be plotted as supplementary plots by passing them to the function as named arguments each containing a numerical vector of the same length as self.command.
"""
d_keys = data.keys()
l_dk = len(d_keys)
plt.figure(figsize = (9, 3 + 3 * l_dk))
plt.suptitle(str(self.label))
# Plot generated noise over time.
plt.subplot(1 + l_dk, 1, 1)
plt.title('Generated stimulus')
plt.xlabel('Time (ms)')
plt.ylabel('Command')
plt.plot(self.time, self.command, '-k', linewidth = 0.5)
# Add plots from data passed as named arguments.
i = 2
for key in d_keys:
plt.subplot(1 + l_dk, 1, i)
plt.title( key )
plt.xlabel('Time (ms)')
plt.plot(self.time, data[ key ], '-k', linewidth = 0.5)
i += 1
# Final formatting and show plot.
plt.tight_layout(rect = (0, 0, 1, 0.95))
plt.show()
# Write command and time to an ATF file.
def write_ATF(self, fname = None):
"""
Write command and time to an ATF file in the current working directory.
"""
# Check whether there is any data to write.
try:
assert self.command is not None
assert self.time is not None
except AssertionError:
raise RuntimeError('Command and time must both exist!')
if fname is None:
fname = self.label + '.ATF'
elif fname[-4:].upper() != '.ATF':
fname = fname + '.ATF'
header_ls = [
'ATF1.0\n1\t{}\nType=1\nTime (ms)\t'.format(self.command.shape[1]),
*['Command (AU)\t' for sweep in range(self.command.shape[1])],
'\n'
]
header = ''.join(header_ls)
# Convert numeric arrays to strings.
str_command = self.command.astype(np.unicode_)
str_time = self.time.astype(np.unicode_)
# Initialize list to hold arrays.
content_ls = []
# Tab-delimit data one row (i.e., time step) at a time.
for t in range(len(str_time)):
tmp = str_time[t] + '\t' + '\t'.join(str_command[t, :])
content_ls.append(tmp)
# Turn the content list into one long string.
content = '\n'.join(content_ls)
# Write the header and content strings to the file.
with open(fname, 'w') as f:
f.write(header)
f.write(content)
f.close()
### HIDDEN METHODS
# Fast internal method to realize OU noise. (Called by generate_OU.)
@staticmethod
@nb.jit(
nb.float64[:](
nb.float64[:], nb.float64[:], nb.float64,
nb.float64, nb.float64, nb.float64[:]
),
nopython = True
)
def _gen_OU_internal(T, rands, dt, I0, tau, sigma):
I = np.zeros(T.shape, dtype = np.float64)
I[0] = I0
for t in range(1, len(T)):
adaptive_term = (I0 - I[t - 1])
random_term = np.sqrt(2 * sigma[t]**2 * dt / tau) * rands[t]
dV = adaptive_term * dt / tau + random_term
I[t] = I[t - 1] + dV
return I
| mit |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/doc/mpl_examples/pylab_examples/polar_demo.py | 3 | 2332 | #!/usr/bin/env python
#
# matplotlib now has a PolarAxes class and a polar function in the
# matplotlib interface. This is considered alpha and the interface
# may change as we work out how polar axes should best be integrated
#
# The only function that has been tested on polar axes is "plot" (the
# pylab interface function "polar" calls ax.plot where ax is a
# PolarAxes) -- other axes plotting functions may work on PolarAxes
# but haven't been tested and may need tweaking.
#
# you can get a PolarSubplot instance by doing, for example
#
# subplot(211, polar=True)
#
# or a PolarAxes instance by doing
# axes([left, bottom, width, height], polar=True)
#
# The view limits (eg xlim and ylim) apply to the lower left and upper
# right of the rectangular box that surrounds to polar axes. Eg if
# you have
#
# r = arange(0,1,0.01)
# theta = 2*pi*r
#
# the lower left corner is 5/4pi, sqrt(2) and the
# upper right corner is 1/4pi, sqrt(2)
#
# you could change the radial bounding box (zoom out) by setting the
# ylim (radial coordinate is the second argument to the plot command,
# as in MATLAB, though this is not advised currently because it is not
# clear to me how the axes should behave in the change of view limits.
# Please advise me if you have opinions. Likewise, the pan/zoom
# controls probably do not do what you think they do and are better
# left alone on polar axes. Perhaps I will disable them for polar
# axes unless we come up with a meaningful, useful and functional
# implementation for them.
#
# See the pylab rgrids and thetagrids functions for
# information on how to customize the grid locations and labels
import matplotlib
import numpy as np
from matplotlib.pyplot import figure, show, rc, grid
# radar green, solid grid lines
rc('grid', color='#316931', linewidth=1, linestyle='-')
rc('xtick', labelsize=15)
rc('ytick', labelsize=15)
# force square figure and square axes looks better for polar, IMO
width, height = matplotlib.rcParams['figure.figsize']
size = min(width, height)
# make a square figure
fig = figure(figsize=(size, size))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True, axisbg='#d5de9c')
r = np.arange(0, 3.0, 0.01)
theta = 2*np.pi*r
ax.plot(theta, r, color='#ee8d18', lw=3)
ax.set_rmax(2.0)
grid(True)
ax.set_title("And there was much rejoicing!", fontsize=20)
show()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.