repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
wind-python/windpowerlib
|
tests/test_temperature.py
|
1
|
1126
|
"""
SPDX-FileCopyrightText: 2019 oemof developer group <[email protected]>
SPDX-License-Identifier: MIT
"""
import pandas as pd
import numpy as np
from pandas.util.testing import assert_series_equal
from numpy.testing import assert_array_equal
from windpowerlib.temperature import linear_gradient
class TestTemperature:
def test_linear_gradient(self):
"""Test temperature as pd.Series"""
parameters = {
"temperature": pd.Series(data=[267, 268]),
"temperature_height": 2,
"hub_height": 100,
}
temp_hub_exp = pd.Series(data=[266.363, 267.36300])
assert_series_equal(linear_gradient(**parameters), temp_hub_exp)
def test_temperature_as_np_array(self):
"""Test temperature as np.array"""
parameters = {
"temperature": np.array([267, 268]),
"temperature_height": 2,
"hub_height": 100,
}
temp_hub_exp = np.array([266.363, 267.36300])
assert_array_equal(linear_gradient(**parameters), temp_hub_exp)
assert isinstance(linear_gradient(**parameters), np.ndarray)
|
mit
|
yanboliang/spark
|
python/pyspark/sql/tests/test_pandas_udf.py
|
4
|
10926
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.sql.functions import udf, pandas_udf, PandasUDFType
from pyspark.sql.types import *
from pyspark.sql.utils import ParseException
from pyspark.rdd import PythonEvalType
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
from py4j.protocol import Py4JJavaError
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class PandasUDFTests(ReusedSQLTestCase):
def test_pandas_udf_basic(self):
udf = pandas_udf(lambda x: x, DoubleType())
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, DoubleType(), PandasUDFType.SCALAR)
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'double', PandasUDFType.SCALAR)
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, StructType([StructField("v", DoubleType())]),
PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'v double', PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'v double',
functionType=PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, returnType='v double',
functionType=PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
def test_pandas_udf_decorator(self):
@pandas_udf(DoubleType())
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
@pandas_udf(returnType=DoubleType())
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
schema = StructType([StructField("v", DoubleType())])
@pandas_udf(schema, PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf('v double', PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf(schema, functionType=PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf(returnType='double', functionType=PandasUDFType.SCALAR)
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
@pandas_udf(returnType=schema, functionType=PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
def test_udf_wrong_arg(self):
with QuietTest(self.sc):
with self.assertRaises(ParseException):
@pandas_udf('blah')
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, 'Invalid returnType.*None'):
@pandas_udf(functionType=PandasUDFType.SCALAR)
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, 'Invalid functionType'):
@pandas_udf('double', 100)
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, '0-arg pandas_udfs.*not.*supported'):
pandas_udf(lambda: 1, LongType(), PandasUDFType.SCALAR)
with self.assertRaisesRegexp(ValueError, '0-arg pandas_udfs.*not.*supported'):
@pandas_udf(LongType(), PandasUDFType.SCALAR)
def zero_with_type():
return 1
with self.assertRaisesRegexp(TypeError, 'Invalid returnType'):
@pandas_udf(returnType=PandasUDFType.GROUPED_MAP)
def foo(df):
return df
with self.assertRaisesRegexp(TypeError, 'Invalid returnType'):
@pandas_udf(returnType='double', functionType=PandasUDFType.GROUPED_MAP)
def foo(df):
return df
with self.assertRaisesRegexp(ValueError, 'Invalid function'):
@pandas_udf(returnType='k int, v double', functionType=PandasUDFType.GROUPED_MAP)
def foo(k, v, w):
return k
def test_stopiteration_in_udf(self):
def foo(x):
raise StopIteration()
def foofoo(x, y):
raise StopIteration()
exc_message = "Caught StopIteration thrown from user's code; failing the task"
df = self.spark.range(0, 100)
# plain udf (test for SPARK-23754)
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.withColumn('v', udf(foo)('id')).collect
)
# pandas scalar udf
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.withColumn(
'v', pandas_udf(foo, 'double', PandasUDFType.SCALAR)('id')
).collect
)
# pandas grouped map
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.groupBy('id').apply(
pandas_udf(foo, df.schema, PandasUDFType.GROUPED_MAP)
).collect
)
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.groupBy('id').apply(
pandas_udf(foofoo, df.schema, PandasUDFType.GROUPED_MAP)
).collect
)
# pandas grouped agg
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.groupBy('id').agg(
pandas_udf(foo, 'double', PandasUDFType.GROUPED_AGG)('id')
).collect
)
def test_pandas_udf_detect_unsafe_type_conversion(self):
from distutils.version import LooseVersion
import pandas as pd
import numpy as np
import pyarrow as pa
values = [1.0] * 3
pdf = pd.DataFrame({'A': values})
df = self.spark.createDataFrame(pdf).repartition(1)
@pandas_udf(returnType="int")
def udf(column):
return pd.Series(np.linspace(0, 1, 3))
# Since 0.11.0, PyArrow supports the feature to raise an error for unsafe cast.
if LooseVersion(pa.__version__) >= LooseVersion("0.11.0"):
with self.sql_conf({
"spark.sql.execution.pandas.arrowSafeTypeConversion": True}):
with self.assertRaisesRegexp(Exception,
"Exception thrown when converting pandas.Series"):
df.select(['A']).withColumn('udf', udf('A')).collect()
# Disabling Arrow safe type check.
with self.sql_conf({
"spark.sql.execution.pandas.arrowSafeTypeConversion": False}):
df.select(['A']).withColumn('udf', udf('A')).collect()
def test_pandas_udf_arrow_overflow(self):
from distutils.version import LooseVersion
import pandas as pd
import pyarrow as pa
df = self.spark.range(0, 1)
@pandas_udf(returnType="byte")
def udf(column):
return pd.Series([128])
# Arrow 0.11.0+ allows enabling or disabling safe type check.
if LooseVersion(pa.__version__) >= LooseVersion("0.11.0"):
# When enabling safe type check, Arrow 0.11.0+ disallows overflow cast.
with self.sql_conf({
"spark.sql.execution.pandas.arrowSafeTypeConversion": True}):
with self.assertRaisesRegexp(Exception,
"Exception thrown when converting pandas.Series"):
df.withColumn('udf', udf('id')).collect()
# Disabling safe type check, let Arrow do the cast anyway.
with self.sql_conf({"spark.sql.execution.pandas.arrowSafeTypeConversion": False}):
df.withColumn('udf', udf('id')).collect()
else:
# SQL config `arrowSafeTypeConversion` no matters for older Arrow.
# Overflow cast causes an error.
with self.sql_conf({"spark.sql.execution.pandas.arrowSafeTypeConversion": False}):
with self.assertRaisesRegexp(Exception,
"Integer value out of bounds"):
df.withColumn('udf', udf('id')).collect()
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
|
sauloal/cnidaria
|
scripts/venv/lib/python2.7/site-packages/pandas/tseries/tests/test_plotting.py
|
2
|
43349
|
from datetime import datetime, timedelta, date, time
import nose
from pandas.compat import lrange, zip
import numpy as np
from numpy.testing.decorators import slow
from numpy.testing import assert_array_equal
from pandas import Index, Series, DataFrame
from pandas.tseries.index import date_range, bdate_range
from pandas.tseries.offsets import DateOffset
from pandas.tseries.period import period_range, Period, PeriodIndex
from pandas.tseries.resample import DatetimeIndex
from pandas.util.testing import assert_series_equal, ensure_clean
import pandas.util.testing as tm
from pandas.tests.test_graphics import _skip_if_no_scipy_gaussian_kde
@tm.mplskip
class TestTSPlot(tm.TestCase):
def setUp(self):
freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q', 'Y']
idx = [period_range('12/31/1999', freq=x, periods=100) for x in freq]
self.period_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.period_df = [DataFrame(np.random.randn(len(x), 3), index=x,
columns=['A', 'B', 'C'])
for x in idx]
freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q-DEC', 'A', '1B30Min']
idx = [date_range('12/31/1999', freq=x, periods=100) for x in freq]
self.datetime_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.datetime_df = [DataFrame(np.random.randn(len(x), 3), index=x,
columns=['A', 'B', 'C'])
for x in idx]
def tearDown(self):
tm.close()
@slow
def test_ts_plot_with_tz(self):
# GH2877
index = date_range('1/1/2011', periods=2, freq='H',
tz='Europe/Brussels')
ts = Series([188.5, 328.25], index=index)
_check_plot_works(ts.plot)
def test_fontsize_set_correctly(self):
# For issue #8765
import matplotlib.pyplot as plt
df = DataFrame(np.random.randn(10, 9), index=range(10))
ax = df.plot(fontsize=2)
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
self.assertEqual(label.get_fontsize(), 2)
@slow
def test_frame_inferred(self):
# inferred freq
import matplotlib.pyplot as plt
idx = date_range('1/1/1987', freq='MS', periods=100)
idx = DatetimeIndex(idx.values, freq=None)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df.plot)
# axes freq
idx = idx[0:40].union(idx[45:99])
df2 = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df2.plot)
# N > 1
idx = date_range('2008-1-1 00:15:00', freq='15T', periods=10)
idx = DatetimeIndex(idx.values, freq=None)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df.plot)
def test_nonnumeric_exclude(self):
import matplotlib.pyplot as plt
idx = date_range('1/1/1987', freq='A', periods=3)
df = DataFrame({'A': ["x", "y", "z"], 'B': [1,2,3]}, idx)
ax = df.plot() # it works
self.assertEqual(len(ax.get_lines()), 1) #B was plotted
plt.close(plt.gcf())
self.assertRaises(TypeError, df['A'].plot)
@slow
def test_tsplot(self):
from pandas.tseries.plotting import tsplot
import matplotlib.pyplot as plt
ax = plt.gca()
ts = tm.makeTimeSeries()
f = lambda *args, **kwds: tsplot(s, plt.Axes.plot, *args, **kwds)
for s in self.period_ser:
_check_plot_works(f, s.index.freq, ax=ax, series=s)
for s in self.datetime_ser:
_check_plot_works(f, s.index.freq.rule_code, ax=ax, series=s)
ax = ts.plot(style='k')
self.assertEqual((0., 0., 0.), ax.get_lines()[0].get_color())
def test_both_style_and_color(self):
import matplotlib.pyplot as plt
ts = tm.makeTimeSeries()
self.assertRaises(ValueError, ts.plot, style='b-', color='#000099')
s = ts.reset_index(drop=True)
self.assertRaises(ValueError, s.plot, style='b-', color='#000099')
@slow
def test_high_freq(self):
freaks = ['ms', 'us']
for freq in freaks:
rng = date_range('1/1/2012', periods=100000, freq=freq)
ser = Series(np.random.randn(len(rng)), rng)
_check_plot_works(ser.plot)
def test_get_datevalue(self):
from pandas.tseries.converter import get_datevalue
self.assertIsNone(get_datevalue(None, 'D'))
self.assertEqual(get_datevalue(1987, 'A'), 1987)
self.assertEqual(get_datevalue(Period(1987, 'A'), 'M'),
Period('1987-12', 'M').ordinal)
self.assertEqual(get_datevalue('1/1/1987', 'D'),
Period('1987-1-1', 'D').ordinal)
@slow
def test_ts_plot_format_coord(self):
def check_format_of_first_point(ax, expected_string):
first_line = ax.get_lines()[0]
first_x = first_line.get_xdata()[0].ordinal
first_y = first_line.get_ydata()[0]
try:
self.assertEqual(expected_string, ax.format_coord(first_x, first_y))
except (ValueError):
raise nose.SkipTest("skipping test because issue forming test comparison GH7664")
annual = Series(1, index=date_range('2014-01-01', periods=3, freq='A-DEC'))
check_format_of_first_point(annual.plot(), 't = 2014 y = 1.000000')
# note this is added to the annual plot already in existence, and changes its freq field
daily = Series(1, index=date_range('2014-01-01', periods=3, freq='D'))
check_format_of_first_point(daily.plot(), 't = 2014-01-01 y = 1.000000')
@slow
def test_line_plot_period_series(self):
for s in self.period_ser:
_check_plot_works(s.plot, s.index.freq)
@slow
def test_line_plot_datetime_series(self):
for s in self.datetime_ser:
_check_plot_works(s.plot, s.index.freq.rule_code)
@slow
def test_line_plot_period_frame(self):
for df in self.period_df:
_check_plot_works(df.plot, df.index.freq)
@slow
def test_line_plot_datetime_frame(self):
for df in self.datetime_df:
freq = df.index.to_period(df.index.freq.rule_code).freq
_check_plot_works(df.plot, freq)
@slow
def test_line_plot_inferred_freq(self):
for ser in self.datetime_ser:
ser = Series(ser.values, Index(np.asarray(ser.index)))
_check_plot_works(ser.plot, ser.index.inferred_freq)
ser = ser[[0, 3, 5, 6]]
_check_plot_works(ser.plot)
def test_fake_inferred_business(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
fig.add_subplot(111)
rng = date_range('2001-1-1', '2001-1-10')
ts = Series(lrange(len(rng)), rng)
ts = ts[:3].append(ts[5:])
ax = ts.plot()
self.assertFalse(hasattr(ax, 'freq'))
@slow
def test_plot_offset_freq(self):
ser = tm.makeTimeSeries()
_check_plot_works(ser.plot)
dr = date_range(ser.index[0], freq='BQS', periods=10)
ser = Series(np.random.randn(len(dr)), dr)
_check_plot_works(ser.plot)
@slow
def test_plot_multiple_inferred_freq(self):
dr = Index([datetime(2000, 1, 1),
datetime(2000, 1, 6),
datetime(2000, 1, 11)])
ser = Series(np.random.randn(len(dr)), dr)
_check_plot_works(ser.plot)
@slow
def test_uhf(self):
import pandas.tseries.converter as conv
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
fig.add_subplot(111)
idx = date_range('2012-6-22 21:59:51.960928', freq='L', periods=500)
df = DataFrame(np.random.randn(len(idx), 2), idx)
ax = df.plot()
axis = ax.get_xaxis()
tlocs = axis.get_ticklocs()
tlabels = axis.get_ticklabels()
for loc, label in zip(tlocs, tlabels):
xp = conv._from_ordinal(loc).strftime('%H:%M:%S.%f')
rs = str(label.get_text())
if len(rs):
self.assertEqual(xp, rs)
@slow
def test_irreg_hf(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
fig.add_subplot(111)
idx = date_range('2012-6-22 21:59:51', freq='S', periods=100)
df = DataFrame(np.random.randn(len(idx), 2), idx)
irreg = df.ix[[0, 1, 3, 4]]
ax = irreg.plot()
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
sec = 1. / 24 / 60 / 60
self.assertTrue((np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all())
plt.clf()
fig.add_subplot(111)
df2 = df.copy()
df2.index = df.index.asobject
ax = df2.plot()
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
self.assertTrue((np.fabs(diffs[1:] - sec) < 1e-8).all())
def test_irregular_datetime64_repr_bug(self):
import matplotlib.pyplot as plt
ser = tm.makeTimeSeries()
ser = ser[[0, 1, 2, 7]]
fig = plt.gcf()
plt.clf()
ax = fig.add_subplot(211)
ret = ser.plot()
self.assertIsNotNone(ret)
for rs, xp in zip(ax.get_lines()[0].get_xdata(), ser.index):
self.assertEqual(rs, xp)
def test_business_freq(self):
import matplotlib.pyplot as plt
bts = tm.makePeriodSeries()
ax = bts.plot()
self.assertEqual(ax.get_lines()[0].get_xydata()[0, 0],
bts.index[0].ordinal)
idx = ax.get_lines()[0].get_xdata()
self.assertEqual(PeriodIndex(data=idx).freqstr, 'B')
@slow
def test_business_freq_convert(self):
n = tm.N
tm.N = 300
bts = tm.makeTimeSeries().asfreq('BM')
tm.N = n
ts = bts.to_period('M')
ax = bts.plot()
self.assertEqual(ax.get_lines()[0].get_xydata()[0, 0],
ts.index[0].ordinal)
idx = ax.get_lines()[0].get_xdata()
self.assertEqual(PeriodIndex(data=idx).freqstr, 'M')
def test_nonzero_base(self):
# GH2571
idx = (date_range('2012-12-20', periods=24, freq='H') +
timedelta(minutes=30))
df = DataFrame(np.arange(24), index=idx)
ax = df.plot()
rs = ax.get_lines()[0].get_xdata()
self.assertFalse(Index(rs).is_normalized)
def test_dataframe(self):
bts = DataFrame({'a': tm.makeTimeSeries()})
ax = bts.plot()
idx = ax.get_lines()[0].get_xdata()
assert_array_equal(bts.index.to_period(), PeriodIndex(idx))
@slow
def test_axis_limits(self):
import matplotlib.pyplot as plt
def _test(ax):
xlim = ax.get_xlim()
ax.set_xlim(xlim[0] - 5, xlim[1] + 10)
ax.get_figure().canvas.draw()
result = ax.get_xlim()
self.assertEqual(result[0], xlim[0] - 5)
self.assertEqual(result[1], xlim[1] + 10)
# string
expected = (Period('1/1/2000', ax.freq),
Period('4/1/2000', ax.freq))
ax.set_xlim('1/1/2000', '4/1/2000')
ax.get_figure().canvas.draw()
result = ax.get_xlim()
self.assertEqual(int(result[0]), expected[0].ordinal)
self.assertEqual(int(result[1]), expected[1].ordinal)
# datetim
expected = (Period('1/1/2000', ax.freq),
Period('4/1/2000', ax.freq))
ax.set_xlim(datetime(2000, 1, 1), datetime(2000, 4, 1))
ax.get_figure().canvas.draw()
result = ax.get_xlim()
self.assertEqual(int(result[0]), expected[0].ordinal)
self.assertEqual(int(result[1]), expected[1].ordinal)
fig = ax.get_figure()
plt.close(fig)
ser = tm.makeTimeSeries()
ax = ser.plot()
_test(ax)
df = DataFrame({'a': ser, 'b': ser + 1})
ax = df.plot()
_test(ax)
df = DataFrame({'a': ser, 'b': ser + 1})
axes = df.plot(subplots=True)
for ax in axes:
_test(ax)
def test_get_finder(self):
import pandas.tseries.converter as conv
self.assertEqual(conv.get_finder('B'), conv._daily_finder)
self.assertEqual(conv.get_finder('D'), conv._daily_finder)
self.assertEqual(conv.get_finder('M'), conv._monthly_finder)
self.assertEqual(conv.get_finder('Q'), conv._quarterly_finder)
self.assertEqual(conv.get_finder('A'), conv._annual_finder)
self.assertEqual(conv.get_finder('W'), conv._daily_finder)
@slow
def test_finder_daily(self):
import matplotlib.pyplot as plt
xp = Period('1999-1-1', freq='B').ordinal
day_lst = [10, 40, 252, 400, 950, 2750, 10000]
for n in day_lst:
rng = bdate_range('1999-1-1', periods=n)
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
plt.close(ax.get_figure())
@slow
def test_finder_quarterly(self):
import matplotlib.pyplot as plt
xp = Period('1988Q1').ordinal
yrs = [3.5, 11]
for n in yrs:
rng = period_range('1987Q2', periods=int(n * 4), freq='Q')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(rs, xp)
(vmin, vmax) = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
plt.close(ax.get_figure())
@slow
def test_finder_monthly(self):
import matplotlib.pyplot as plt
xp = Period('Jan 1988').ordinal
yrs = [1.15, 2.5, 4, 11]
for n in yrs:
rng = period_range('1987Q2', periods=int(n * 12), freq='M')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(rs, xp)
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
plt.close(ax.get_figure())
def test_finder_monthly_long(self):
rng = period_range('1988Q1', periods=24 * 12, freq='M')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1989Q1', 'M').ordinal
self.assertEqual(rs, xp)
@slow
def test_finder_annual(self):
import matplotlib.pyplot as plt
xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170]
for i, nyears in enumerate([5, 10, 19, 49, 99, 199, 599, 1001]):
rng = period_range('1987', periods=nyears, freq='A')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(rs, Period(xp[i], freq='A').ordinal)
plt.close(ax.get_figure())
@slow
def test_finder_minutely(self):
nminutes = 50 * 24 * 60
rng = date_range('1/1/1999', freq='Min', periods=nminutes)
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1/1/1999', freq='Min').ordinal
self.assertEqual(rs, xp)
def test_finder_hourly(self):
nhours = 23
rng = date_range('1/1/1999', freq='H', periods=nhours)
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1/1/1999', freq='H').ordinal
self.assertEqual(rs, xp)
@slow
def test_gaps(self):
import matplotlib.pyplot as plt
ts = tm.makeTimeSeries()
ts[5:25] = np.nan
ax = ts.plot()
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
l = lines[0]
data = l.get_xydata()
tm.assert_isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[5:25, 1].all())
plt.close(ax.get_figure())
# irregular
ts = tm.makeTimeSeries()
ts = ts[[0, 1, 2, 5, 7, 9, 12, 15, 20]]
ts[2:5] = np.nan
ax = ts.plot()
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
l = lines[0]
data = l.get_xydata()
tm.assert_isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[2:5, 1].all())
plt.close(ax.get_figure())
# non-ts
idx = [0, 1, 2, 5, 7, 9, 12, 15, 20]
ser = Series(np.random.randn(len(idx)), idx)
ser[2:5] = np.nan
ax = ser.plot()
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
l = lines[0]
data = l.get_xydata()
tm.assert_isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[2:5, 1].all())
@slow
def test_gap_upsample(self):
low = tm.makeTimeSeries()
low[5:25] = np.nan
ax = low.plot()
idxh = date_range(low.index[0], low.index[-1], freq='12h')
s = Series(np.random.randn(len(idxh)), idxh)
s.plot(secondary_y=True)
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
self.assertEqual(len(ax.right_ax.get_lines()), 1)
l = lines[0]
data = l.get_xydata()
tm.assert_isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[5:25, 1].all())
@slow
def test_secondary_y(self):
import matplotlib.pyplot as plt
ser = Series(np.random.randn(10))
ser2 = Series(np.random.randn(10))
ax = ser.plot(secondary_y=True)
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
fig = ax.get_figure()
axes = fig.get_axes()
l = ax.get_lines()[0]
xp = Series(l.get_ydata(), l.get_xdata())
assert_series_equal(ser, xp)
self.assertEqual(ax.get_yaxis().get_ticks_position(), 'right')
self.assertFalse(axes[0].get_yaxis().get_visible())
plt.close(fig)
ax2 = ser2.plot()
self.assertEqual(ax2.get_yaxis().get_ticks_position(), 'default')
plt.close(ax2.get_figure())
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
self.assertTrue(ax.get_yaxis().get_visible())
self.assertFalse(hasattr(ax, 'left_ax'))
self.assertTrue(hasattr(ax, 'right_ax'))
self.assertTrue(hasattr(ax2, 'left_ax'))
self.assertFalse(hasattr(ax2, 'right_ax'))
@slow
def test_secondary_y_ts(self):
import matplotlib.pyplot as plt
idx = date_range('1/1/2000', periods=10)
ser = Series(np.random.randn(10), idx)
ser2 = Series(np.random.randn(10), idx)
ax = ser.plot(secondary_y=True)
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
fig = ax.get_figure()
axes = fig.get_axes()
l = ax.get_lines()[0]
xp = Series(l.get_ydata(), l.get_xdata()).to_timestamp()
assert_series_equal(ser, xp)
self.assertEqual(ax.get_yaxis().get_ticks_position(), 'right')
self.assertFalse(axes[0].get_yaxis().get_visible())
plt.close(fig)
ax2 = ser2.plot()
self.assertEqual(ax2.get_yaxis().get_ticks_position(), 'default')
plt.close(ax2.get_figure())
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
self.assertTrue(ax.get_yaxis().get_visible())
@slow
def test_secondary_kde(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
import matplotlib.pyplot as plt
ser = Series(np.random.randn(10))
ax = ser.plot(secondary_y=True, kind='density')
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
fig = ax.get_figure()
axes = fig.get_axes()
self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'right')
@slow
def test_secondary_bar(self):
ser = Series(np.random.randn(10))
ax = ser.plot(secondary_y=True, kind='bar')
fig = ax.get_figure()
axes = fig.get_axes()
self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'right')
@slow
def test_secondary_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=['a', 'b', 'c'])
axes = df.plot(secondary_y=['a', 'c'], subplots=True)
self.assertEqual(axes[0].get_yaxis().get_ticks_position(), 'right')
self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'default')
self.assertEqual(axes[2].get_yaxis().get_ticks_position(), 'right')
@slow
def test_secondary_bar_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=['a', 'b', 'c'])
axes = df.plot(kind='bar', secondary_y=['a', 'c'], subplots=True)
self.assertEqual(axes[0].get_yaxis().get_ticks_position(), 'right')
self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'default')
self.assertEqual(axes[2].get_yaxis().get_ticks_position(), 'right')
def test_mixed_freq_regular_first(self):
import matplotlib.pyplot as plt
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
ax = s1.plot()
ax2 = s2.plot(style='g')
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
self.assertTrue(idx1.equals(s1.index.to_period('B')))
self.assertTrue(idx2.equals(s2.index.to_period('B')))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
self.assertEqual(left, pidx[0].ordinal)
self.assertEqual(right, pidx[-1].ordinal)
@slow
def test_mixed_freq_irregular_first(self):
import matplotlib.pyplot as plt
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
s2.plot(style='g')
ax = s1.plot()
self.assertFalse(hasattr(ax, 'freq'))
lines = ax.get_lines()
x1 = lines[0].get_xdata()
assert_array_equal(x1, s2.index.asobject.values)
x2 = lines[1].get_xdata()
assert_array_equal(x2, s1.index.asobject.values)
def test_mixed_freq_regular_first_df(self):
# GH 9852
import matplotlib.pyplot as plt
s1 = tm.makeTimeSeries().to_frame()
s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
ax = s1.plot()
ax2 = s2.plot(style='g', ax=ax)
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
self.assertTrue(idx1.equals(s1.index.to_period('B')))
self.assertTrue(idx2.equals(s2.index.to_period('B')))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
self.assertEqual(left, pidx[0].ordinal)
self.assertEqual(right, pidx[-1].ordinal)
@slow
def test_mixed_freq_irregular_first_df(self):
# GH 9852
import matplotlib.pyplot as plt
s1 = tm.makeTimeSeries().to_frame()
s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
ax = s2.plot(style='g')
ax = s1.plot(ax=ax)
self.assertFalse(hasattr(ax, 'freq'))
lines = ax.get_lines()
x1 = lines[0].get_xdata()
assert_array_equal(x1, s2.index.asobject.values)
x2 = lines[1].get_xdata()
assert_array_equal(x2, s1.index.asobject.values)
def test_mixed_freq_hf_first(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
high.plot()
ax = low.plot()
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'D')
@slow
def test_mixed_freq_alignment(self):
ts_ind = date_range('2012-01-01 13:00', '2012-01-02', freq='H')
ts_data = np.random.randn(12)
ts = Series(ts_data, index=ts_ind)
ts2 = ts.asfreq('T').interpolate()
ax = ts.plot()
ts2.plot(style='r')
self.assertEqual(ax.lines[0].get_xdata()[0],
ax.lines[1].get_xdata()[0])
@slow
def test_mixed_freq_lf_first(self):
import matplotlib.pyplot as plt
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot(legend=True)
ax = high.plot(legend=True)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'D')
leg = ax.get_legend()
self.assertEqual(len(leg.texts), 2)
plt.close(ax.get_figure())
idxh = date_range('1/1/1999', periods=240, freq='T')
idxl = date_range('1/1/1999', periods=4, freq='H')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot()
ax = high.plot()
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'T')
def test_mixed_freq_irreg_period(self):
ts = tm.makeTimeSeries()
irreg = ts[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 18, 29]]
rng = period_range('1/3/2000', periods=30, freq='B')
ps = Series(np.random.randn(len(rng)), rng)
irreg.plot()
ps.plot()
@slow
def test_to_weekly_resampling(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
high.plot()
ax = low.plot()
for l in ax.get_lines():
self.assertTrue(PeriodIndex(data=l.get_xdata()).freq.startswith('W'))
@slow
def test_from_weekly_resampling(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot()
ax = high.plot()
expected_h = idxh.to_period().asi8
expected_l = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549,
1553, 1558, 1562])
for l in ax.get_lines():
self.assertTrue(PeriodIndex(data=l.get_xdata()).freq.startswith('W'))
xdata = l.get_xdata(orig=False)
if len(xdata) == 12: # idxl lines
self.assert_numpy_array_equal(xdata, expected_l)
else:
self.assert_numpy_array_equal(xdata, expected_h)
@slow
def test_from_resampling_area_line_mixed(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = DataFrame(np.random.rand(len(idxh), 3),
index=idxh, columns=[0, 1, 2])
low = DataFrame(np.random.rand(len(idxl), 3),
index=idxl, columns=[0, 1, 2])
# low to high
for kind1, kind2 in [('line', 'area'), ('area', 'line')]:
ax = low.plot(kind=kind1, stacked=True)
ax = high.plot(kind=kind2, stacked=True, ax=ax)
# check low dataframe result
expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549,
1553, 1558, 1562])
expected_y = np.zeros(len(expected_x))
for i in range(3):
l = ax.lines[i]
self.assertTrue(PeriodIndex(data=l.get_xdata()).freq.startswith('W'))
self.assert_numpy_array_equal(l.get_xdata(orig=False), expected_x)
# check stacked values are correct
expected_y += low[i].values
self.assert_numpy_array_equal(l.get_ydata(orig=False), expected_y)
# check high dataframe result
expected_x = idxh.to_period().asi8
expected_y = np.zeros(len(expected_x))
for i in range(3):
l = ax.lines[3 + i]
self.assertTrue(PeriodIndex(data=l.get_xdata()).freq.startswith('W'))
self.assert_numpy_array_equal(l.get_xdata(orig=False), expected_x)
expected_y += high[i].values
self.assert_numpy_array_equal(l.get_ydata(orig=False), expected_y)
# high to low
for kind1, kind2 in [('line', 'area'), ('area', 'line')]:
ax = high.plot(kind=kind1, stacked=True)
ax = low.plot(kind=kind2, stacked=True, ax=ax)
# check high dataframe result
expected_x = idxh.to_period().asi8
expected_y = np.zeros(len(expected_x))
for i in range(3):
l = ax.lines[i]
self.assertTrue(PeriodIndex(data=l.get_xdata()).freq.startswith('W'))
self.assert_numpy_array_equal(l.get_xdata(orig=False), expected_x)
expected_y += high[i].values
self.assert_numpy_array_equal(l.get_ydata(orig=False), expected_y)
# check low dataframe result
expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549,
1553, 1558, 1562])
expected_y = np.zeros(len(expected_x))
for i in range(3):
l = ax.lines[3 + i]
self.assertTrue(PeriodIndex(data=l.get_xdata()).freq.startswith('W'))
self.assert_numpy_array_equal(l.get_xdata(orig=False), expected_x)
expected_y += low[i].values
self.assert_numpy_array_equal(l.get_ydata(orig=False), expected_y)
@slow
def test_mixed_freq_second_millisecond(self):
# GH 7772, GH 7760
idxh = date_range('2014-07-01 09:00', freq='S', periods=50)
idxl = date_range('2014-07-01 09:00', freq='100L', periods=500)
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
# high to low
high.plot()
ax = low.plot()
self.assertEqual(len(ax.get_lines()), 2)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'L')
tm.close()
# low to high
low.plot()
ax = high.plot()
self.assertEqual(len(ax.get_lines()), 2)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'L')
@slow
def test_irreg_dtypes(self):
# date
idx = [date(2000, 1, 1), date(2000, 1, 5), date(2000, 1, 20)]
df = DataFrame(np.random.randn(len(idx), 3), Index(idx, dtype=object))
_check_plot_works(df.plot)
# np.datetime64
idx = date_range('1/1/2000', periods=10)
idx = idx[[0, 2, 5, 9]].asobject
df = DataFrame(np.random.randn(len(idx), 3), idx)
_check_plot_works(df.plot)
@slow
def test_time(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas])
df = DataFrame({'a': np.random.randn(len(ts)),
'b': np.random.randn(len(ts))},
index=ts)
ax = df.plot()
# verify tick labels
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S')
self.assertEqual(xp, rs)
# change xlim
ax.set_xlim('1:30', '5:00')
# check tick labels again
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S')
self.assertEqual(xp, rs)
@slow
def test_time_musec(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(microseconds=int(x))).time()
for x in deltas])
df = DataFrame({'a': np.random.randn(len(ts)),
'b': np.random.randn(len(ts))},
index=ts)
ax = df.plot()
# verify tick labels
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
us = int((t - int(t)) * 1e6)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S.%f')
self.assertEqual(xp, rs)
@slow
def test_secondary_upsample(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot()
ax = high.plot(secondary_y=True)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(l.get_xdata()).freq, 'D')
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
for l in ax.left_ax.get_lines():
self.assertEqual(PeriodIndex(l.get_xdata()).freq, 'D')
@slow
def test_secondary_legend(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
ax = fig.add_subplot(211)
# ts
df = tm.makeTimeDataFrame()
ax = df.plot(secondary_y=['A', 'B'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertEqual(leg.get_texts()[0].get_text(), 'A (right)')
self.assertEqual(leg.get_texts()[1].get_text(), 'B (right)')
self.assertEqual(leg.get_texts()[2].get_text(), 'C')
self.assertEqual(leg.get_texts()[3].get_text(), 'D')
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
plt.clf()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['A', 'C'], mark_right=False)
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertEqual(leg.get_texts()[0].get_text(), 'A')
self.assertEqual(leg.get_texts()[1].get_text(), 'B')
self.assertEqual(leg.get_texts()[2].get_text(), 'C')
self.assertEqual(leg.get_texts()[3].get_text(), 'D')
plt.clf()
ax = df.plot(kind='bar', secondary_y=['A'])
leg = ax.get_legend()
self.assertEqual(leg.get_texts()[0].get_text(), 'A (right)')
self.assertEqual(leg.get_texts()[1].get_text(), 'B')
plt.clf()
ax = df.plot(kind='bar', secondary_y=['A'], mark_right=False)
leg = ax.get_legend()
self.assertEqual(leg.get_texts()[0].get_text(), 'A')
self.assertEqual(leg.get_texts()[1].get_text(), 'B')
plt.clf()
ax = fig.add_subplot(211)
df = tm.makeTimeDataFrame()
ax = df.plot(secondary_y=['C', 'D'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
# non-ts
df = tm.makeDataFrame()
plt.clf()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['A', 'B'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
plt.clf()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['C', 'D'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
def test_format_date_axis(self):
rng = date_range('1/1/2012', periods=12, freq='M')
df = DataFrame(np.random.randn(len(rng), 3), rng)
ax = df.plot()
xaxis = ax.get_xaxis()
for l in xaxis.get_ticklabels():
if len(l.get_text()) > 0:
self.assertEqual(l.get_rotation(), 30)
@slow
def test_ax_plot(self):
import matplotlib.pyplot as plt
x = DatetimeIndex(start='2012-01-02', periods=10,
freq='D')
y = lrange(len(x))
fig = plt.figure()
ax = fig.add_subplot(111)
lines = ax.plot(x, y, label='Y')
assert_array_equal(DatetimeIndex(lines[0].get_xdata()), x)
@slow
def test_mpl_nopandas(self):
import matplotlib.pyplot as plt
dates = [date(2008, 12, 31), date(2009, 1, 31)]
values1 = np.arange(10.0, 11.0, 0.5)
values2 = np.arange(11.0, 12.0, 0.5)
kw = dict(fmt='-', lw=4)
plt.close('all')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot_date([x.toordinal() for x in dates], values1, **kw)
ax.plot_date([x.toordinal() for x in dates], values2, **kw)
line1, line2 = ax.get_lines()
assert_array_equal(np.array([x.toordinal() for x in dates]),
line1.get_xydata()[:, 0])
assert_array_equal(np.array([x.toordinal() for x in dates]),
line2.get_xydata()[:, 0])
@slow
def test_irregular_ts_shared_ax_xlim(self):
# GH 2960
ts = tm.makeTimeSeries()[:20]
ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
# plot the left section of the irregular series, then the right section
ax = ts_irregular[:5].plot()
ts_irregular[5:].plot(ax=ax)
# check that axis limits are correct
left, right = ax.get_xlim()
self.assertEqual(left, ts_irregular.index.min().toordinal())
self.assertEqual(right, ts_irregular.index.max().toordinal())
@slow
def test_secondary_y_non_ts_xlim(self):
# GH 3490 - non-timeseries with secondary y
index_1 = [1, 2, 3, 4]
index_2 = [5, 6, 7, 8]
s1 = Series(1, index=index_1)
s2 = Series(2, index=index_2)
ax = s1.plot()
left_before, right_before = ax.get_xlim()
s2.plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
self.assertEqual(left_before, left_after)
self.assertTrue(right_before < right_after)
@slow
def test_secondary_y_regular_ts_xlim(self):
# GH 3490 - regular-timeseries with secondary y
index_1 = date_range(start='2000-01-01', periods=4, freq='D')
index_2 = date_range(start='2000-01-05', periods=4, freq='D')
s1 = Series(1, index=index_1)
s2 = Series(2, index=index_2)
ax = s1.plot()
left_before, right_before = ax.get_xlim()
s2.plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
self.assertEqual(left_before, left_after)
self.assertTrue(right_before < right_after)
@slow
def test_secondary_y_mixed_freq_ts_xlim(self):
# GH 3490 - mixed frequency timeseries with secondary y
rng = date_range('2000-01-01', periods=10000, freq='min')
ts = Series(1, index=rng)
ax = ts.plot()
left_before, right_before = ax.get_xlim()
ts.resample('D').plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
# a downsample should not have changed either limit
self.assertEqual(left_before, left_after)
self.assertEqual(right_before, right_after)
@slow
def test_secondary_y_irregular_ts_xlim(self):
# GH 3490 - irregular-timeseries with secondary y
ts = tm.makeTimeSeries()[:20]
ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
ax = ts_irregular[:5].plot()
# plot higher-x values on secondary axis
ts_irregular[5:].plot(secondary_y=True, ax=ax)
# ensure secondary limits aren't overwritten by plot on primary
ts_irregular[:5].plot(ax=ax)
left, right = ax.get_xlim()
self.assertEqual(left, ts_irregular.index.min().toordinal())
self.assertEqual(right, ts_irregular.index.max().toordinal())
def _check_plot_works(f, freq=None, series=None, *args, **kwargs):
import matplotlib.pyplot as plt
fig = plt.gcf()
try:
plt.clf()
ax = fig.add_subplot(211)
orig_ax = kwargs.pop('ax', plt.gca())
orig_axfreq = getattr(orig_ax, 'freq', None)
ret = f(*args, **kwargs)
assert ret is not None # do something more intelligent
ax = kwargs.pop('ax', plt.gca())
if series is not None:
dfreq = series.index.freq
if isinstance(dfreq, DateOffset):
dfreq = dfreq.rule_code
if orig_axfreq is None:
assert ax.freq == dfreq
if freq is not None and orig_axfreq is None:
assert ax.freq == freq
ax = fig.add_subplot(212)
try:
kwargs['ax'] = ax
ret = f(*args, **kwargs)
assert ret is not None # do something more intelligent
except Exception:
pass
with ensure_clean(return_filelike=True) as path:
plt.savefig(path)
finally:
plt.close(fig)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
mit
|
emmaggie/hmmlearn
|
hmmlearn/tests/test_base.py
|
4
|
7022
|
from __future__ import print_function
from unittest import TestCase
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from sklearn.utils.extmath import logsumexp
from hmmlearn import hmm
rng = np.random.RandomState(0)
np.seterr(all='warn')
class TestBaseHMM(TestCase):
def setUp(self):
self.prng = np.random.RandomState(9)
class StubHMM(hmm._BaseHMM):
def _compute_log_likelihood(self, X):
return self.framelogprob
def _generate_sample_from_state(self):
pass
def _init(self):
pass
def setup_example_hmm(self):
# Example from http://en.wikipedia.org/wiki/Forward-backward_algorithm
h = self.StubHMM(2)
h.transmat_ = [[0.7, 0.3], [0.3, 0.7]]
h.startprob_ = [0.5, 0.5]
framelogprob = np.log([[0.9, 0.2],
[0.9, 0.2],
[0.1, 0.8],
[0.9, 0.2],
[0.9, 0.2]])
# Add dummy observations to stub.
h.framelogprob = framelogprob
return h, framelogprob
def test_init(self):
h, framelogprob = self.setup_example_hmm()
for params in [('transmat_',), ('startprob_', 'transmat_')]:
d = dict((x[:-1], getattr(h, x)) for x in params)
h2 = self.StubHMM(h.n_components, **d)
self.assertEqual(h.n_components, h2.n_components)
for p in params:
assert_array_almost_equal(getattr(h, p), getattr(h2, p))
def test_set_startprob(self):
h, framelogprob = self.setup_example_hmm()
startprob = np.array([0.0, 1.0])
h.startprob_ = startprob
assert np.allclose(startprob, h.startprob_)
def test_set_transmat(self):
h, framelogprob = self.setup_example_hmm()
transmat = np.array([[0.8, 0.2], [0.0, 1.0]])
h.transmat_ = transmat
assert np.allclose(transmat, h.transmat_)
def test_do_forward_pass(self):
h, framelogprob = self.setup_example_hmm()
logprob, fwdlattice = h._do_forward_pass(framelogprob)
reflogprob = -3.3725
self.assertAlmostEqual(logprob, reflogprob, places=4)
reffwdlattice = np.array([[0.4500, 0.1000],
[0.3105, 0.0410],
[0.0230, 0.0975],
[0.0408, 0.0150],
[0.0298, 0.0046]])
assert_array_almost_equal(np.exp(fwdlattice), reffwdlattice, 4)
def test_do_backward_pass(self):
h, framelogprob = self.setup_example_hmm()
bwdlattice = h._do_backward_pass(framelogprob)
refbwdlattice = np.array([[0.0661, 0.0455],
[0.0906, 0.1503],
[0.4593, 0.2437],
[0.6900, 0.4100],
[1.0000, 1.0000]])
assert_array_almost_equal(np.exp(bwdlattice), refbwdlattice, 4)
def test_do_viterbi_pass(self):
h, framelogprob = self.setup_example_hmm()
logprob, state_sequence = h._do_viterbi_pass(framelogprob)
refstate_sequence = [0, 0, 1, 0, 0]
assert_array_equal(state_sequence, refstate_sequence)
reflogprob = -4.4590
self.assertAlmostEqual(logprob, reflogprob, places=4)
def test_score_samples(self):
h, framelogprob = self.setup_example_hmm()
nobs = len(framelogprob)
logprob, posteriors = h.score_samples([])
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
reflogprob = -3.3725
self.assertAlmostEqual(logprob, reflogprob, places=4)
refposteriors = np.array([[0.8673, 0.1327],
[0.8204, 0.1796],
[0.3075, 0.6925],
[0.8204, 0.1796],
[0.8673, 0.1327]])
assert_array_almost_equal(posteriors, refposteriors, decimal=4)
def test_hmm_score_samples_consistent_with_gmm(self):
n_components = 8
nobs = 10
h = self.StubHMM(n_components)
# Add dummy observations to stub.
framelogprob = np.log(self.prng.rand(nobs, n_components))
h.framelogprob = framelogprob
# If startprob and transmat are uniform across all states (the
# default), the transitions are uninformative - the model
# reduces to a GMM with uniform mixing weights (in terms of
# posteriors, not likelihoods).
logprob, hmmposteriors = h.score_samples([])
assert_array_almost_equal(hmmposteriors.sum(axis=1), np.ones(nobs))
norm = logsumexp(framelogprob, axis=1)[:, np.newaxis]
gmmposteriors = np.exp(framelogprob - np.tile(norm, (1, n_components)))
assert_array_almost_equal(hmmposteriors, gmmposteriors)
def test_hmm_decode_consistent_with_gmm(self):
n_components = 8
nobs = 10
h = self.StubHMM(n_components)
# Add dummy observations to stub.
framelogprob = np.log(self.prng.rand(nobs, n_components))
h.framelogprob = framelogprob
# If startprob and transmat are uniform across all states (the
# default), the transitions are uninformative - the model
# reduces to a GMM with uniform mixing weights (in terms of
# posteriors, not likelihoods).
viterbi_ll, state_sequence = h.decode([])
norm = logsumexp(framelogprob, axis=1)[:, np.newaxis]
gmmposteriors = np.exp(framelogprob - np.tile(norm, (1, n_components)))
gmmstate_sequence = gmmposteriors.argmax(axis=1)
assert_array_equal(state_sequence, gmmstate_sequence)
def test_base_hmm_attributes(self):
n_components = 20
startprob = self.prng.rand(n_components)
startprob = startprob / startprob.sum()
transmat = self.prng.rand(n_components, n_components)
transmat /= np.tile(transmat.sum(axis=1)
[:, np.newaxis], (1, n_components))
h = self.StubHMM(n_components)
self.assertEqual(h.n_components, n_components)
h.startprob_ = startprob
assert_array_almost_equal(h.startprob_, startprob)
self.assertRaises(ValueError, setattr, h, 'startprob_',
2 * startprob)
self.assertRaises(ValueError, setattr, h, 'startprob_', [])
self.assertRaises(ValueError, setattr, h, 'startprob_',
np.zeros((n_components - 2, 2)))
h.transmat_ = transmat
assert_array_almost_equal(h.transmat_, transmat)
self.assertRaises(ValueError, setattr, h, 'transmat_',
2 * transmat)
self.assertRaises(ValueError, setattr, h, 'transmat_', [])
self.assertRaises(ValueError, setattr, h, 'transmat_',
np.zeros((n_components - 2, n_components)))
|
bsd-3-clause
|
chrish42/pylearn
|
pylearn2/scripts/train.py
|
34
|
8573
|
#!/usr/bin/env python
"""
Script implementing the logic for training pylearn2 models.
This is a "driver" that we recommend using for all but the most unusual
training experiments.
Basic usage:
.. code-block:: none
train.py yaml_file.yaml
The YAML file should contain a pylearn2 YAML description of a
`pylearn2.train.Train` object (or optionally, a list of Train objects to
run sequentially).
See `doc/yaml_tutorial` for a description of how to write the YAML syntax.
The following environment variables will be locally defined and available
for use within the YAML file:
- `PYLEARN2_TRAIN_BASE_NAME`: the name of the file within the directory
(`foo/bar.yaml` -> `bar.yaml`)
- `PYLEARN2_TRAIN_DIR`: the directory containing the YAML file
(`foo/bar.yaml` -> `foo`)
- `PYLEARN2_TRAIN_FILE_FULL_STEM`: the filepath with the file extension
stripped off.
`foo/bar.yaml` -> `foo/bar`)
- `PYLEARN2_TRAIN_FILE_STEM`: the stem of `PYLEARN2_TRAIN_BASE_NAME`
(`foo/bar.yaml` -> `bar`)
- `PYLEARN2_TRAIN_PHASE` : set to `phase0`, `phase1`, etc. during iteration
through a list of Train objects. Not defined for a single train object.
These environment variables are especially useful for setting the save
path. For example, to make sure that `foo/bar.yaml` saves to `foo/bar.pkl`,
use
.. code-block:: none
save_path: "${PYLEARN2_TRAIN_FILE_FULL_STEM}.pkl"
This way, if you copy `foo/bar.yaml` to `foo/bar2.yaml`, the output of
`foo/bar2.yaml` won't overwrite `foo/bar.pkl`, but will automatically save
to foo/bar2.pkl.
For example configuration files that are consumable by this script, see
- `pylearn2/scripts/tutorials/grbm_smd`
- `pylearn2/scripts/tutorials/dbm_demo`
- `pylearn2/scripts/papers/maxout`
Use `train.py -h` to see an auto-generated description of advanced options.
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow", "David Warde-Farley"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
# Standard library imports
import argparse
import gc
import logging
import os
# Third-party imports
import numpy as np
# Disable the display for the plot extension to work
# An alternative is to create another training script
if os.getenv('DISPLAY') is None:
try:
import matplotlib
matplotlib.use('Agg')
except:
pass
# Local imports
from pylearn2.utils import serial
from pylearn2.utils.logger import (
CustomStreamHandler, CustomFormatter, restore_defaults
)
class FeatureDump(object):
"""
.. todo::
WRITEME
Parameters
----------
encoder : WRITEME
dataset : WRITEME
path : WRITEME
batch_size : WRITEME
topo : WRITEME
"""
def __init__(self, encoder, dataset, path, batch_size=None, topo=False):
"""
.. todo::
WRITEME
"""
self.encoder = encoder
self.dataset = dataset
self.path = path
self.batch_size = batch_size
self.topo = topo
def main_loop(self, **kwargs):
"""
.. todo::
WRITEME
Parameters
----------
**kwargs : dict, optional
WRITEME
"""
if self.batch_size is None:
if self.topo:
data = self.dataset.get_topological_view()
else:
data = self.dataset.get_design_matrix()
output = self.encoder.perform(data)
else:
myiterator = self.dataset.iterator(mode='sequential',
batch_size=self.batch_size,
topo=self.topo)
chunks = []
for data in myiterator:
chunks.append(self.encoder.perform(data))
output = np.concatenate(chunks)
np.save(self.path, output)
def make_argument_parser():
"""
Creates an ArgumentParser to read the options for this script from
sys.argv
"""
parser = argparse.ArgumentParser(
description="Launch an experiment from a YAML configuration file.",
epilog='\n'.join(__doc__.strip().split('\n')[1:]).strip(),
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument('--level-name', '-L',
action='store_true',
help='Display the log level (e.g. DEBUG, INFO) '
'for each logged message')
parser.add_argument('--timestamp', '-T',
action='store_true',
help='Display human-readable timestamps for '
'each logged message')
parser.add_argument('--time-budget', '-t', type=int,
help='Time budget in seconds. Stop training at '
'the end of an epoch if more than this '
'number of seconds has elapsed.')
parser.add_argument('--verbose-logging', '-V',
action='store_true',
help='Display timestamp, log level and source '
'logger for every logged message '
'(implies -T).')
parser.add_argument('--debug', '-D',
action='store_true',
help='Display any DEBUG-level log messages, '
'suppressed by default.')
parser.add_argument('config', action='store',
choices=None,
help='A YAML configuration file specifying the '
'training procedure')
return parser
def train(config, level_name=None, timestamp=None, time_budget=None,
verbose_logging=None, debug=None):
"""
Trains a given YAML file.
Parameters
----------
config : str
A YAML configuration file specifying the
training procedure.
level_name : bool, optional
Display the log level (e.g. DEBUG, INFO)
for each logged message.
timestamp : bool, optional
Display human-readable timestamps for
each logged message.
time_budget : int, optional
Time budget in seconds. Stop training at
the end of an epoch if more than this
number of seconds has elapsed.
verbose_logging : bool, optional
Display timestamp, log level and source
logger for every logged message
(implies timestamp and level_name are True).
debug : bool, optional
Display any DEBUG-level log messages,
False by default.
"""
train_obj = serial.load_train_file(config)
try:
iter(train_obj)
iterable = True
except TypeError:
iterable = False
# Undo our custom logging setup.
restore_defaults()
# Set up the root logger with a custom handler that logs stdout for INFO
# and DEBUG and stderr for WARNING, ERROR, CRITICAL.
root_logger = logging.getLogger()
if verbose_logging:
formatter = logging.Formatter(fmt="%(asctime)s %(name)s %(levelname)s "
"%(message)s")
handler = CustomStreamHandler(formatter=formatter)
else:
if timestamp:
prefix = '%(asctime)s '
else:
prefix = ''
formatter = CustomFormatter(prefix=prefix, only_from='pylearn2')
handler = CustomStreamHandler(formatter=formatter)
root_logger.addHandler(handler)
# Set the root logger level.
if debug:
root_logger.setLevel(logging.DEBUG)
else:
root_logger.setLevel(logging.INFO)
if iterable:
for number, subobj in enumerate(iter(train_obj)):
# Publish a variable indicating the training phase.
phase_variable = 'PYLEARN2_TRAIN_PHASE'
phase_value = 'phase%d' % (number + 1)
os.environ[phase_variable] = phase_value
# Execute this training phase.
subobj.main_loop(time_budget=time_budget)
# Clean up, in case there's a lot of memory used that's
# necessary for the next phase.
del subobj
gc.collect()
else:
train_obj.main_loop(time_budget=time_budget)
if __name__ == "__main__":
"""
See module-level docstring for a description of the script.
"""
parser = make_argument_parser()
args = parser.parse_args()
train(args.config, args.level_name, args.timestamp, args.time_budget,
args.verbose_logging, args.debug)
|
bsd-3-clause
|
mdiaz236/DeepLearningFoundations
|
weight-initialization/helper.py
|
153
|
3649
|
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def hist_dist(title, distribution_tensor, hist_range=(-4, 4)):
"""
Display histogram of a TF distribution
"""
with tf.Session() as sess:
values = sess.run(distribution_tensor)
plt.title(title)
plt.hist(values, np.linspace(*hist_range, num=len(values)/2))
plt.show()
def _get_loss_acc(dataset, weights):
"""
Get losses and validation accuracy of example neural network
"""
batch_size = 128
epochs = 2
learning_rate = 0.001
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
learn_rate = tf.placeholder(tf.float32)
biases = [
tf.Variable(tf.zeros([256])),
tf.Variable(tf.zeros([128])),
tf.Variable(tf.zeros([dataset.train.labels.shape[1]]))
]
# Layers
layer_1 = tf.nn.relu(tf.matmul(features, weights[0]) + biases[0])
layer_2 = tf.nn.relu(tf.matmul(layer_1, weights[1]) + biases[1])
logits = tf.matmul(layer_2, weights[2]) + biases[2]
# Training loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# Optimizer
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(loss)
# Accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Measurements use for graphing loss
loss_batch = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
batch_count = int((dataset.train.num_examples / batch_size))
# The training cycle
for epoch_i in range(epochs):
for batch_i in range(batch_count):
batch_features, batch_labels = dataset.train.next_batch(batch_size)
# Run optimizer and get loss
session.run(
optimizer,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
l = session.run(
loss,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
loss_batch.append(l)
valid_acc = session.run(
accuracy,
feed_dict={features: dataset.validation.images, labels: dataset.validation.labels, learn_rate: 1.0})
# Hack to Reset batches
dataset.train._index_in_epoch = 0
dataset.train._epochs_completed = 0
return loss_batch, valid_acc
def compare_init_weights(
dataset,
title,
weight_init_list,
plot_n_batches=100):
"""
Plot loss and print stats of weights using an example neural network
"""
colors = ['r', 'b', 'g', 'c', 'y', 'k']
label_accs = []
label_loss = []
assert len(weight_init_list) <= len(colors), 'Too many inital weights to plot'
for i, (weights, label) in enumerate(weight_init_list):
loss, val_acc = _get_loss_acc(dataset, weights)
plt.plot(loss[:plot_n_batches], colors[i], label=label)
label_accs.append((label, val_acc))
label_loss.append((label, loss[-1]))
plt.title(title)
plt.xlabel('Batches')
plt.ylabel('Loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
print('After 858 Batches (2 Epochs):')
print('Validation Accuracy')
for label, val_acc in label_accs:
print(' {:7.3f}% -- {}'.format(val_acc*100, label))
print('Loss')
for label, loss in label_loss:
print(' {:7.3f} -- {}'.format(loss, label))
|
mit
|
fengzhyuan/scikit-learn
|
examples/bicluster/bicluster_newsgroups.py
|
162
|
7103
|
"""
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
weight = X[rows[:, np.newaxis], cols].sum()
cut = (X[row_complement[:, np.newaxis], cols].sum() +
X[rows[:, np.newaxis], col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
|
bsd-3-clause
|
AkademieOlympia/sympy
|
sympy/utilities/runtests.py
|
34
|
81153
|
"""
This is our testing framework.
Goals:
* it should be compatible with py.test and operate very similarly
(or identically)
* doesn't require any external dependencies
* preferably all the functionality should be in this file only
* no magic, just import the test file and execute the test functions, that's it
* portable
"""
from __future__ import print_function, division
import os
import sys
import platform
import inspect
import traceback
import pdb
import re
import linecache
import time
from fnmatch import fnmatch
from timeit import default_timer as clock
import doctest as pdoctest # avoid clashing with our doctest() function
from doctest import DocTestFinder, DocTestRunner
import random
import subprocess
import signal
import stat
from inspect import isgeneratorfunction
from sympy.core.cache import clear_cache
from sympy.core.compatibility import exec_, PY3, string_types, range
from sympy.utilities.misc import find_executable
from sympy.external import import_module
from sympy.utilities.exceptions import SymPyDeprecationWarning
IS_WINDOWS = (os.name == 'nt')
class Skipped(Exception):
pass
import __future__
# add more flags ??
future_flags = __future__.division.compiler_flag
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in ``s``, and return the result.
If the string ``s`` is Unicode, it is encoded using the stdout
encoding and the ``backslashreplace`` error handler.
"""
# After a 2to3 run the below code is bogus, so wrap it with a version check
if not PY3:
if isinstance(s, unicode):
s = s.encode(pdoctest._encoding, 'backslashreplace')
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
pdoctest._indent = _indent
# ovverride reporter to maintain windows and python3
def _report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
s = self._checker.output_difference(example, got, self.optionflags)
s = s.encode('raw_unicode_escape').decode('utf8', 'ignore')
out(self._failure_header(test, example) + s)
if PY3 and IS_WINDOWS:
DocTestRunner.report_failure = _report_failure
def convert_to_native_paths(lst):
"""
Converts a list of '/' separated paths into a list of
native (os.sep separated) paths and converts to lowercase
if the system is case insensitive.
"""
newlst = []
for i, rv in enumerate(lst):
rv = os.path.join(*rv.split("/"))
# on windows the slash after the colon is dropped
if sys.platform == "win32":
pos = rv.find(':')
if pos != -1:
if rv[pos + 1] != '\\':
rv = rv[:pos + 1] + '\\' + rv[pos + 1:]
newlst.append(sys_normcase(rv))
return newlst
def get_sympy_dir():
"""
Returns the root sympy directory and set the global value
indicating whether the system is case sensitive or not.
"""
global sys_case_insensitive
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..")
sympy_dir = os.path.normpath(sympy_dir)
sys_case_insensitive = (os.path.isdir(sympy_dir) and
os.path.isdir(sympy_dir.lower()) and
os.path.isdir(sympy_dir.upper()))
return sys_normcase(sympy_dir)
def sys_normcase(f):
if sys_case_insensitive: # global defined after call to get_sympy_dir()
return f.lower()
return f
def setup_pprint():
from sympy import pprint_use_unicode, init_printing
# force pprint to be in ascii mode in doctests
pprint_use_unicode(False)
# hook our nice, hash-stable strprinter
init_printing(pretty_print=False)
def run_in_subprocess_with_hash_randomization(function, function_args=(),
function_kwargs={}, command=sys.executable,
module='sympy.utilities.runtests', force=False):
"""
Run a function in a Python subprocess with hash randomization enabled.
If hash randomization is not supported by the version of Python given, it
returns False. Otherwise, it returns the exit value of the command. The
function is passed to sys.exit(), so the return value of the function will
be the return value.
The environment variable PYTHONHASHSEED is used to seed Python's hash
randomization. If it is set, this function will return False, because
starting a new subprocess is unnecessary in that case. If it is not set,
one is set at random, and the tests are run. Note that if this
environment variable is set when Python starts, hash randomization is
automatically enabled. To force a subprocess to be created even if
PYTHONHASHSEED is set, pass ``force=True``. This flag will not force a
subprocess in Python versions that do not support hash randomization (see
below), because those versions of Python do not support the ``-R`` flag.
``function`` should be a string name of a function that is importable from
the module ``module``, like "_test". The default for ``module`` is
"sympy.utilities.runtests". ``function_args`` and ``function_kwargs``
should be a repr-able tuple and dict, respectively. The default Python
command is sys.executable, which is the currently running Python command.
This function is necessary because the seed for hash randomization must be
set by the environment variable before Python starts. Hence, in order to
use a predetermined seed for tests, we must start Python in a separate
subprocess.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
Examples
========
>>> from sympy.utilities.runtests import (
... run_in_subprocess_with_hash_randomization)
>>> # run the core tests in verbose mode
>>> run_in_subprocess_with_hash_randomization("_test",
... function_args=("core",),
... function_kwargs={'verbose': True}) # doctest: +SKIP
# Will return 0 if sys.executable supports hash randomization and tests
# pass, 1 if they fail, and False if it does not support hash
# randomization.
"""
# Note, we must return False everywhere, not None, as subprocess.call will
# sometimes return None.
# First check if the Python version supports hash randomization
# If it doesn't have this support, it won't reconize the -R flag
p = subprocess.Popen([command, "-RV"], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode != 0:
return False
hash_seed = os.getenv("PYTHONHASHSEED")
if not hash_seed:
os.environ["PYTHONHASHSEED"] = str(random.randrange(2**32))
else:
if not force:
return False
# Now run the command
commandstring = ("import sys; from %s import %s;sys.exit(%s(*%s, **%s))" %
(module, function, function, repr(function_args),
repr(function_kwargs)))
try:
p = subprocess.Popen([command, "-R", "-c", commandstring])
p.communicate()
except KeyboardInterrupt:
p.wait()
finally:
# Put the environment variable back, so that it reads correctly for
# the current Python process.
if hash_seed is None:
del os.environ["PYTHONHASHSEED"]
else:
os.environ["PYTHONHASHSEED"] = hash_seed
return p.returncode
def run_all_tests(test_args=(), test_kwargs={}, doctest_args=(),
doctest_kwargs={}, examples_args=(), examples_kwargs={'quiet': True}):
"""
Run all tests.
Right now, this runs the regular tests (bin/test), the doctests
(bin/doctest), the examples (examples/all.py), and the sage tests (see
sympy/external/tests/test_sage.py).
This is what ``setup.py test`` uses.
You can pass arguments and keyword arguments to the test functions that
support them (for now, test, doctest, and the examples). See the
docstrings of those functions for a description of the available options.
For example, to run the solvers tests with colors turned off:
>>> from sympy.utilities.runtests import run_all_tests
>>> run_all_tests(test_args=("solvers",),
... test_kwargs={"colors:False"}) # doctest: +SKIP
"""
tests_successful = True
try:
# Regular tests
if not test(*test_args, **test_kwargs):
# some regular test fails, so set the tests_successful
# flag to false and continue running the doctests
tests_successful = False
# Doctests
print()
if not doctest(*doctest_args, **doctest_kwargs):
tests_successful = False
# Examples
print()
sys.path.append("examples")
from all import run_examples # examples/all.py
if not run_examples(*examples_args, **examples_kwargs):
tests_successful = False
# Sage tests
if not (sys.platform == "win32" or PY3):
# run Sage tests; Sage currently doesn't support Windows or Python 3
dev_null = open(os.devnull, 'w')
if subprocess.call("sage -v", shell=True, stdout=dev_null,
stderr=dev_null) == 0:
if subprocess.call("sage -python bin/test "
"sympy/external/tests/test_sage.py", shell=True) != 0:
tests_successful = False
if tests_successful:
return
else:
# Return nonzero exit code
sys.exit(1)
except KeyboardInterrupt:
print()
print("DO *NOT* COMMIT!")
sys.exit(1)
def test(*paths, **kwargs):
"""
Run tests in the specified test_*.py files.
Tests in a particular test_*.py file are run if any of the given strings
in ``paths`` matches a part of the test file's path. If ``paths=[]``,
tests in all test_*.py files are run.
Notes:
- If sort=False, tests are run in random order (not default).
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
**Explanation of test results**
====== ===============================================================
Output Meaning
====== ===============================================================
. passed
F failed
X XPassed (expected to fail but passed)
f XFAILed (expected to fail and indeed failed)
s skipped
w slow
T timeout (e.g., when ``--timeout`` is used)
K KeyboardInterrupt (when running the slow tests with ``--slow``,
you can interrupt one of them without killing the test runner)
====== ===============================================================
Colors have no additional meaning and are used just to facilitate
interpreting the output.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.test() # doctest: +SKIP
Run one file:
>>> sympy.test("sympy/core/tests/test_basic.py") # doctest: +SKIP
>>> sympy.test("_basic") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... "sympy/functions") # doctest: +SKIP
Run all tests in sympy/core and sympy/utilities:
>>> sympy.test("/core", "/util") # doctest: +SKIP
Run specific test from a file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... kw="test_equality") # doctest: +SKIP
Run specific test from any file:
>>> sympy.test(kw="subs") # doctest: +SKIP
Run the tests with verbose mode on:
>>> sympy.test(verbose=True) # doctest: +SKIP
Don't sort the test output:
>>> sympy.test(sort=False) # doctest: +SKIP
Turn on post-mortem pdb:
>>> sympy.test(pdb=True) # doctest: +SKIP
Turn off colors:
>>> sympy.test(colors=False) # doctest: +SKIP
Force colors, even when the output is not to a terminal (this is useful,
e.g., if you are piping to ``less -r`` and you still want colors)
>>> sympy.test(force_colors=False) # doctest: +SKIP
The traceback verboseness can be set to "short" or "no" (default is
"short")
>>> sympy.test(tb='no') # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. For instance, to run the first half of the test suite:
>>> sympy.test(split='1/2') # doctest: +SKIP
You can disable running the tests in a separate subprocess using
``subprocess=False``. This is done to support seeding hash randomization,
which is enabled by default in the Python versions where it is supported.
If subprocess=False, hash randomization is enabled/disabled according to
whether it has been enabled or not in the calling Python process.
However, even if it is enabled, the seed cannot be printed unless it is
called from a new Python process.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
If hash randomization is not supported ``subprocess=False`` is used
automatically.
>>> sympy.test(subprocess=False) # doctest: +SKIP
To set the hash randomization seed, set the environment variable
``PYTHONHASHSEED`` before running the tests. This can be done from within
Python using
>>> import os
>>> os.environ['PYTHONHASHSEED'] = '42' # doctest: +SKIP
Or from the command line using
$ PYTHONHASHSEED=42 ./bin/test
If the seed is not set, a random seed will be chosen.
Note that to reproduce the same hash values, you must use both the same seed
as well as the same architecture (32-bit vs. 64-bit).
"""
subprocess = kwargs.pop("subprocess", True)
rerun = kwargs.pop("rerun", 0)
# count up from 0, do not print 0
print_counter = lambda i : (print("rerun %d" % (rerun-i))
if rerun-i else None)
if subprocess:
# loop backwards so last i is 0
for i in range(rerun, -1, -1):
print_counter(i)
ret = run_in_subprocess_with_hash_randomization("_test",
function_args=paths, function_kwargs=kwargs)
if ret is False:
break
val = not bool(ret)
# exit on the first failure or if done
if not val or i == 0:
return val
# rerun even if hash randomization is not supported
for i in range(rerun, -1, -1):
print_counter(i)
val = not bool(_test(*paths, **kwargs))
if not val or i == 0:
return val
def _test(*paths, **kwargs):
"""
Internal function that actually runs the tests.
All keyword arguments from ``test()`` are passed to this function except for
``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstring of
``test()`` for more information.
"""
verbose = kwargs.get("verbose", False)
tb = kwargs.get("tb", "short")
kw = kwargs.get("kw", None) or ()
# ensure that kw is a tuple
if isinstance(kw, str):
kw = (kw, )
post_mortem = kwargs.get("pdb", False)
colors = kwargs.get("colors", True)
force_colors = kwargs.get("force_colors", False)
sort = kwargs.get("sort", True)
seed = kwargs.get("seed", None)
if seed is None:
seed = random.randrange(100000000)
timeout = kwargs.get("timeout", False)
slow = kwargs.get("slow", False)
enhance_asserts = kwargs.get("enhance_asserts", False)
split = kwargs.get('split', None)
blacklist = kwargs.get('blacklist', [])
blacklist = convert_to_native_paths(blacklist)
fast_threshold = kwargs.get('fast_threshold', None)
slow_threshold = kwargs.get('slow_threshold', None)
r = PyTestReporter(verbose=verbose, tb=tb, colors=colors,
force_colors=force_colors, split=split)
t = SymPyTests(r, kw, post_mortem, seed,
fast_threshold=fast_threshold,
slow_threshold=slow_threshold)
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
# Show deprecation warnings
import warnings
warnings.simplefilter("error", SymPyDeprecationWarning)
test_files = t.get_test_files('sympy')
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if slow:
# Seed to evenly shuffle slow tests among splits
random.seed(41992450)
random.shuffle(matched)
if split:
matched = split_list(matched, split)
t._testfiles.extend(matched)
return int(not t.test(sort=sort, timeout=timeout,
slow=slow, enhance_asserts=enhance_asserts))
def doctest(*paths, **kwargs):
"""
Runs doctests in all \*.py files in the sympy directory which match
any of the given strings in ``paths`` or all tests if paths=[].
Notes:
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.doctest() # doctest: +SKIP
Run one file:
>>> sympy.doctest("sympy/core/basic.py") # doctest: +SKIP
>>> sympy.doctest("polynomial.rst") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.doctest("/functions", "basic.py") # doctest: +SKIP
Run any file having polynomial in its name, doc/src/modules/polynomial.rst,
sympy/functions/special/polynomials.py, and sympy/polys/polynomial.py:
>>> sympy.doctest("polynomial") # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. Note that the regular doctests and the Sphinx
doctests are split independently. For instance, to run the first half of
the test suite:
>>> sympy.doctest(split='1/2') # doctest: +SKIP
The ``subprocess`` and ``verbose`` options are the same as with the function
``test()``. See the docstring of that function for more information.
"""
subprocess = kwargs.pop("subprocess", True)
rerun = kwargs.pop("rerun", 0)
# count up from 0, do not print 0
print_counter = lambda i : (print("rerun %d" % (rerun-i))
if rerun-i else None)
if subprocess:
# loop backwards so last i is 0
for i in range(rerun, -1, -1):
print_counter(i)
ret = run_in_subprocess_with_hash_randomization("_doctest",
function_args=paths, function_kwargs=kwargs)
if ret is False:
break
val = not bool(ret)
# exit on the first failure or if done
if not val or i == 0:
return val
# rerun even if hash randomization is not supported
for i in range(rerun, -1, -1):
print_counter(i)
val = not bool(_doctest(*paths, **kwargs))
if not val or i == 0:
return val
def _doctest(*paths, **kwargs):
"""
Internal function that actually runs the doctests.
All keyword arguments from ``doctest()`` are passed to this function
except for ``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstrings of
``doctest()`` and ``test()`` for more information.
"""
normal = kwargs.get("normal", False)
verbose = kwargs.get("verbose", False)
colors = kwargs.get("colors", True)
force_colors = kwargs.get("force_colors", False)
blacklist = kwargs.get("blacklist", [])
split = kwargs.get('split', None)
blacklist.extend([
"doc/src/modules/plotting.rst", # generates live plots
"sympy/utilities/compilef.py", # needs tcc
"sympy/physics/gaussopt.py", # raises deprecation warning
])
if import_module('numpy') is None:
blacklist.extend([
"sympy/plotting/experimental_lambdify.py",
"sympy/plotting/plot_implicit.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py",
"examples/intermediate/sample.py",
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py",
"doc/src/modules/numeric-computation.rst"
])
else:
if import_module('matplotlib') is None:
blacklist.extend([
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py"
])
else:
# don't display matplotlib windows
from sympy.plotting.plot import unset_show
unset_show()
if import_module('pyglet') is None:
blacklist.extend(["sympy/plotting/pygletplot"])
if import_module('theano') is None:
blacklist.extend(["doc/src/modules/numeric-computation.rst"])
# disabled because of doctest failures in asmeurer's bot
blacklist.extend([
"sympy/utilities/autowrap.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py"
])
# blacklist these modules until issue 4840 is resolved
blacklist.extend([
"sympy/conftest.py",
"sympy/utilities/benchmarking.py"
])
blacklist = convert_to_native_paths(blacklist)
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
# Show deprecation warnings
import warnings
warnings.simplefilter("error", SymPyDeprecationWarning)
r = PyTestReporter(verbose, split=split, colors=colors,\
force_colors=force_colors)
t = SymPyDocTests(r, normal)
test_files = t.get_test_files('sympy')
test_files.extend(t.get_test_files('examples', init_only=False))
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# take only what was requested...but not blacklisted items
# and allow for partial match anywhere or fnmatch of name
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
t._testfiles.extend(matched)
# run the tests and record the result for this *py portion of the tests
if t._testfiles:
failed = not t.test()
else:
failed = False
# N.B.
# --------------------------------------------------------------------
# Here we test *.rst files at or below doc/src. Code from these must
# be self supporting in terms of imports since there is no importing
# of necessary modules by doctest.testfile. If you try to pass *.py
# files through this they might fail because they will lack the needed
# imports and smarter parsing that can be done with source code.
#
test_files = t.get_test_files('doc/src', '*.rst', init_only=False)
test_files.sort()
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# Take only what was requested as long as it's not on the blacklist.
# Paths were already made native in *py tests so don't repeat here.
# There's no chance of having a *py file slip through since we
# only have *rst files in test_files.
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
setup_pprint()
first_report = True
for rst_file in matched:
if not os.path.isfile(rst_file):
continue
old_displayhook = sys.displayhook
try:
out = sympytestfile(
rst_file, module_relative=False, encoding='utf-8',
optionflags=pdoctest.ELLIPSIS | pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
finally:
# make sure we return to the original displayhook in case some
# doctest has changed that
sys.displayhook = old_displayhook
rstfailed, tested = out
if tested:
failed = rstfailed or failed
if first_report:
first_report = False
msg = 'rst doctests start'
if not t._testfiles:
r.start(msg=msg)
else:
r.write_center(msg)
print()
# use as the id, everything past the first 'sympy'
file_id = rst_file[rst_file.find('sympy') + len('sympy') + 1:]
print(file_id, end=" ")
# get at least the name out so it is know who is being tested
wid = r.terminal_width - len(file_id) - 1 # update width
test_file = '[%s]' % (tested)
report = '[%s]' % (rstfailed or 'OK')
print(''.join(
[test_file, ' '*(wid - len(test_file) - len(report)), report])
)
# the doctests for *py will have printed this message already if there was
# a failure, so now only print it if there was intervening reporting by
# testing the *rst as evidenced by first_report no longer being True.
if not first_report and failed:
print()
print("DO *NOT* COMMIT!")
return int(failed)
sp = re.compile(r'([0-9]+)/([1-9][0-9]*)')
def split_list(l, split):
"""
Splits a list into part a of b
split should be a string of the form 'a/b'. For instance, '1/3' would give
the split one of three.
If the length of the list is not divisible by the number of splits, the
last split will have more items.
>>> from sympy.utilities.runtests import split_list
>>> a = list(range(10))
>>> split_list(a, '1/3')
[0, 1, 2]
>>> split_list(a, '2/3')
[3, 4, 5]
>>> split_list(a, '3/3')
[6, 7, 8, 9]
"""
m = sp.match(split)
if not m:
raise ValueError("split must be a string of the form a/b where a and b are ints")
i, t = map(int, m.groups())
return l[(i - 1)*len(l)//t:i*len(l)//t]
from collections import namedtuple
SymPyTestResults = namedtuple('TestResults', 'failed attempted')
def sympytestfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False,
parser=pdoctest.DocTestParser(), encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg ``module_relative`` specifies how filenames
should be interpreted:
- If ``module_relative`` is True (the default), then ``filename``
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
``package`` argument is specified, then it is relative to that
package. To ensure os-independence, ``filename`` should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If ``module_relative`` is False, then ``filename`` specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg ``name`` gives the name of the test; by default
use the file's basename.
Optional keyword argument ``package`` is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify ``package`` if ``module_relative`` is False.
Optional keyword arg ``globs`` gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg ``extraglobs`` gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg ``verbose`` prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg ``report`` prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg ``optionflags`` or's together module constants,
and defaults to 0. Possible values (see the docs for details):
- DONT_ACCEPT_TRUE_FOR_1
- DONT_ACCEPT_BLANKLINE
- NORMALIZE_WHITESPACE
- ELLIPSIS
- SKIP
- IGNORE_EXCEPTION_DETAIL
- REPORT_UDIFF
- REPORT_CDIFF
- REPORT_NDIFF
- REPORT_ONLY_FIRST_FAILURE
Optional keyword arg ``raise_on_error`` raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg ``parser`` specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg ``encoding`` specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if not PY3:
text, filename = pdoctest._load_testfile(
filename, package, module_relative)
if encoding is not None:
text = text.decode(encoding)
else:
text, filename = pdoctest._load_testfile(
filename, package, module_relative, encoding)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = pdoctest.DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = SymPyDocTestRunner(verbose=verbose, optionflags=optionflags)
runner._checker = SymPyOutputChecker()
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test, compileflags=future_flags)
if report:
runner.summarize()
if pdoctest.master is None:
pdoctest.master = runner
else:
pdoctest.master.merge(runner)
return SymPyTestResults(runner.failures, runner.tries)
class SymPyTests(object):
def __init__(self, reporter, kw="", post_mortem=False,
seed=None, fast_threshold=None, slow_threshold=None):
self._post_mortem = post_mortem
self._kw = kw
self._count = 0
self._root_dir = sympy_dir
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._testfiles = []
self._seed = seed if seed is not None else random.random()
# Defaults in seconds, from human / UX design limits
# http://www.nngroup.com/articles/response-times-3-important-limits/
#
# These defaults are *NOT* set in stone as we are measuring different
# things, so others feel free to come up with a better yardstick :)
if fast_threshold:
self._fast_threshold = float(fast_threshold)
else:
self._fast_threshold = 0.1
if slow_threshold:
self._slow_threshold = float(slow_threshold)
else:
self._slow_threshold = 10
def test(self, sort=False, timeout=False, slow=False, enhance_asserts=False):
"""
Runs the tests returning True if all tests pass, otherwise False.
If sort=False run tests in random order.
"""
if sort:
self._testfiles.sort()
elif slow:
pass
else:
random.seed(self._seed)
random.shuffle(self._testfiles)
self._reporter.start(self._seed)
for f in self._testfiles:
try:
self.test_file(f, sort, timeout, slow, enhance_asserts)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def _enhance_asserts(self, source):
from ast import (NodeTransformer, Compare, Name, Store, Load, Tuple,
Assign, BinOp, Str, Mod, Assert, parse, fix_missing_locations)
ops = {"Eq": '==', "NotEq": '!=', "Lt": '<', "LtE": '<=',
"Gt": '>', "GtE": '>=', "Is": 'is', "IsNot": 'is not',
"In": 'in', "NotIn": 'not in'}
class Transform(NodeTransformer):
def visit_Assert(self, stmt):
if isinstance(stmt.test, Compare):
compare = stmt.test
values = [compare.left] + compare.comparators
names = [ "_%s" % i for i, _ in enumerate(values) ]
names_store = [ Name(n, Store()) for n in names ]
names_load = [ Name(n, Load()) for n in names ]
target = Tuple(names_store, Store())
value = Tuple(values, Load())
assign = Assign([target], value)
new_compare = Compare(names_load[0], compare.ops, names_load[1:])
msg_format = "\n%s " + "\n%s ".join([ ops[op.__class__.__name__] for op in compare.ops ]) + "\n%s"
msg = BinOp(Str(msg_format), Mod(), Tuple(names_load, Load()))
test = Assert(new_compare, msg, lineno=stmt.lineno, col_offset=stmt.col_offset)
return [assign, test]
else:
return stmt
tree = parse(source)
new_tree = Transform().visit(tree)
return fix_missing_locations(new_tree)
def test_file(self, filename, sort=True, timeout=False, slow=False, enhance_asserts=False):
reporter = self._reporter
funcs = []
try:
gl = {'__file__': filename}
try:
if PY3:
open_file = lambda: open(filename, encoding="utf8")
else:
open_file = lambda: open(filename)
with open_file() as f:
source = f.read()
if self._kw:
for l in source.splitlines():
if l.lstrip().startswith('def '):
if any(l.find(k) != -1 for k in self._kw):
break
else:
return
if enhance_asserts:
try:
source = self._enhance_asserts(source)
except ImportError:
pass
code = compile(source, filename, "exec")
exec_(code, gl)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
reporter.import_error(filename, sys.exc_info())
return
clear_cache()
self._count += 1
random.seed(self._seed)
pytestfile = ""
if "XFAIL" in gl:
pytestfile = inspect.getsourcefile(gl["XFAIL"])
pytestfile2 = ""
if "slow" in gl:
pytestfile2 = inspect.getsourcefile(gl["slow"])
disabled = gl.get("disabled", False)
if not disabled:
# we need to filter only those functions that begin with 'test_'
# that are defined in the testing file or in the file where
# is defined the XFAIL decorator
funcs = [gl[f] for f in gl.keys() if f.startswith("test_") and
(inspect.isfunction(gl[f]) or inspect.ismethod(gl[f])) and
(inspect.getsourcefile(gl[f]) == filename or
inspect.getsourcefile(gl[f]) == pytestfile or
inspect.getsourcefile(gl[f]) == pytestfile2)]
if slow:
funcs = [f for f in funcs if getattr(f, '_slow', False)]
# Sorting of XFAILed functions isn't fixed yet :-(
funcs.sort(key=lambda x: inspect.getsourcelines(x)[1])
i = 0
while i < len(funcs):
if isgeneratorfunction(funcs[i]):
# some tests can be generators, that return the actual
# test functions. We unpack it below:
f = funcs.pop(i)
for fg in f():
func = fg[0]
args = fg[1:]
fgw = lambda: func(*args)
funcs.insert(i, fgw)
i += 1
else:
i += 1
# drop functions that are not selected with the keyword expression:
funcs = [x for x in funcs if self.matches(x)]
if not funcs:
return
except Exception:
reporter.entering_filename(filename, len(funcs))
raise
reporter.entering_filename(filename, len(funcs))
if not sort:
random.shuffle(funcs)
for f in funcs:
start = time.time()
reporter.entering_test(f)
try:
if getattr(f, '_slow', False) and not slow:
raise Skipped("Slow")
if timeout:
self._timeout(f, timeout)
else:
random.seed(self._seed)
f()
except KeyboardInterrupt:
if getattr(f, '_slow', False):
reporter.test_skip("KeyboardInterrupt")
else:
raise
except Exception:
if timeout:
signal.alarm(0) # Disable the alarm. It could not be handled before.
t, v, tr = sys.exc_info()
if t is AssertionError:
reporter.test_fail((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
elif t.__name__ == "Skipped":
reporter.test_skip(v)
elif t.__name__ == "XFail":
reporter.test_xfail()
elif t.__name__ == "XPass":
reporter.test_xpass(v)
else:
reporter.test_exception((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
else:
reporter.test_pass()
taken = time.time() - start
if taken > self._slow_threshold:
reporter.slow_test_functions.append((f.__name__, taken))
if getattr(f, '_slow', False) and slow:
if taken < self._fast_threshold:
reporter.fast_test_functions.append((f.__name__, taken))
reporter.leaving_filename()
def _timeout(self, function, timeout):
def callback(x, y):
signal.alarm(0)
raise Skipped("Timeout")
signal.signal(signal.SIGALRM, callback)
signal.alarm(timeout) # Set an alarm with a given timeout
function()
signal.alarm(0) # Disable the alarm
def matches(self, x):
"""
Does the keyword expression self._kw match "x"? Returns True/False.
Always returns True if self._kw is "".
"""
if not self._kw:
return True
for kw in self._kw:
if x.__name__.find(kw) != -1:
return True
return False
def get_test_files(self, dir, pat='test_*.py'):
"""
Returns the list of test_*.py (default) files at or below directory
``dir`` relative to the sympy home directory.
"""
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files if fnmatch(f, pat)])
return sorted([sys_normcase(gi) for gi in g])
class SymPyDocTests(object):
def __init__(self, reporter, normal):
self._count = 0
self._root_dir = sympy_dir
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._normal = normal
self._testfiles = []
def test(self):
"""
Runs the tests and returns True if all tests pass, otherwise False.
"""
self._reporter.start()
for f in self._testfiles:
try:
self.test_file(f)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def test_file(self, filename):
clear_cache()
from sympy.core.compatibility import StringIO
rel_name = filename[len(self._root_dir) + 1:]
dirname, file = os.path.split(filename)
module = rel_name.replace(os.sep, '.')[:-3]
if rel_name.startswith("examples"):
# Examples files do not have __init__.py files,
# So we have to temporarily extend sys.path to import them
sys.path.insert(0, dirname)
module = file[:-3] # remove ".py"
setup_pprint()
try:
module = pdoctest._normalize_module(module)
tests = SymPyDocTestFinder().find(module)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
self._reporter.import_error(filename, sys.exc_info())
return
finally:
if rel_name.startswith("examples"):
del sys.path[0]
tests = [test for test in tests if len(test.examples) > 0]
# By default tests are sorted by alphabetical order by function name.
# We sort by line number so one can edit the file sequentially from
# bottom to top. However, if there are decorated functions, their line
# numbers will be too large and for now one must just search for these
# by text and function name.
tests.sort(key=lambda x: -x.lineno)
if not tests:
return
self._reporter.entering_filename(filename, len(tests))
for test in tests:
assert len(test.examples) != 0
# check if there are external dependencies which need to be met
if '_doctest_depends_on' in test.globs:
if not self._process_dependencies(test.globs['_doctest_depends_on']):
self._reporter.test_skip()
continue
runner = SymPyDocTestRunner(optionflags=pdoctest.ELLIPSIS |
pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
runner._checker = SymPyOutputChecker()
old = sys.stdout
new = StringIO()
sys.stdout = new
# If the testing is normal, the doctests get importing magic to
# provide the global namespace. If not normal (the default) then
# then must run on their own; all imports must be explicit within
# a function's docstring. Once imported that import will be
# available to the rest of the tests in a given function's
# docstring (unless clear_globs=True below).
if not self._normal:
test.globs = {}
# if this is uncommented then all the test would get is what
# comes by default with a "from sympy import *"
#exec('from sympy import *') in test.globs
test.globs['print_function'] = print_function
try:
f, t = runner.run(test, compileflags=future_flags,
out=new.write, clear_globs=False)
except KeyboardInterrupt:
raise
finally:
sys.stdout = old
if f > 0:
self._reporter.doctest_fail(test.name, new.getvalue())
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def get_test_files(self, dir, pat='*.py', init_only=True):
"""
Returns the list of \*.py files (default) from which docstrings
will be tested which are at or below directory ``dir``. By default,
only those that have an __init__.py in their parent directory
and do not start with ``test_`` will be included.
"""
def importable(x):
"""
Checks if given pathname x is an importable module by checking for
__init__.py file.
Returns True/False.
Currently we only test if the __init__.py file exists in the
directory with the file "x" (in theory we should also test all the
parent dirs).
"""
init_py = os.path.join(os.path.dirname(x), "__init__.py")
return os.path.exists(init_py)
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files
if not f.startswith('test_') and fnmatch(f, pat)])
if init_only:
# skip files that are not importable (i.e. missing __init__.py)
g = [x for x in g if importable(x)]
return [sys_normcase(gi) for gi in g]
def _process_dependencies(self, deps):
"""
Returns ``False`` if some dependencies are not met and the test should be
skipped otherwise returns ``True``.
"""
executables = deps.get('exe', None)
moduledeps = deps.get('modules', None)
viewers = deps.get('disable_viewers', None)
pyglet = deps.get('pyglet', None)
# print deps
if executables is not None:
for ex in executables:
found = find_executable(ex)
if found is None:
return False
if moduledeps is not None:
for extmod in moduledeps:
if extmod == 'matplotlib':
matplotlib = import_module(
'matplotlib',
__import__kwargs={'fromlist':
['pyplot', 'cm', 'collections']},
min_module_version='1.0.0', catch=(RuntimeError,))
if matplotlib is not None:
pass
else:
return False
else:
# TODO min version support
mod = import_module(extmod)
if mod is not None:
version = "unknown"
if hasattr(mod, '__version__'):
version = mod.__version__
else:
return False
if viewers is not None:
import tempfile
tempdir = tempfile.mkdtemp()
os.environ['PATH'] = '%s:%s' % (tempdir, os.environ['PATH'])
if PY3:
vw = '#!/usr/bin/env python3\n' \
'import sys\n' \
'if len(sys.argv) <= 1:\n' \
' exit("wrong number of args")\n'
else:
vw = '#!/usr/bin/env python\n' \
'import sys\n' \
'if len(sys.argv) <= 1:\n' \
' exit("wrong number of args")\n'
for viewer in viewers:
with open(os.path.join(tempdir, viewer), 'w') as fh:
fh.write(vw)
# make the file executable
os.chmod(os.path.join(tempdir, viewer),
stat.S_IREAD | stat.S_IWRITE | stat.S_IXUSR)
if pyglet:
# monkey-patch pyglet s.t. it does not open a window during
# doctesting
import pyglet
class DummyWindow(object):
def __init__(self, *args, **kwargs):
self.has_exit=True
self.width = 600
self.height = 400
def set_vsync(self, x):
pass
def switch_to(self):
pass
def push_handlers(self, x):
pass
def close(self):
pass
pyglet.window.Window = DummyWindow
return True
class SymPyDocTestFinder(DocTestFinder):
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
Modified from doctest's version by looking harder for code in the
case that it looks like the the code comes from a different module.
In the case of decorated functions (e.g. @vectorize) they appear
to come from a different module (e.g. multidemensional) even though
their code is not there.
"""
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to ``tests``.
"""
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Make sure we don't run doctests for classes outside of sympy, such
# as in numpy or scipy.
if inspect.isclass(obj):
if obj.__module__.split('.')[0] != 'sympy':
return
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
if not self._recurse:
return
# Look for tests in a module's contained objects.
if inspect.ismodule(obj):
for rawname, val in obj.__dict__.items():
# Recurse to functions & classes.
if inspect.isfunction(val) or inspect.isclass(val):
# Make sure we don't run doctests functions or classes
# from different modules
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (rawname %s)" % (val, module, rawname)
try:
valname = '%s.%s' % (name, rawname)
self._find(tests, val, valname, module,
source_lines, globs, seen)
except KeyboardInterrupt:
raise
# Look for tests in a module's __test__ dictionary.
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, string_types):
raise ValueError("SymPyDocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, string_types)):
raise ValueError("SymPyDocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj):
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if (inspect.isfunction(val) or
inspect.isclass(val) or
isinstance(val, property)):
# Make sure we don't run doctests functions or classes
# from different modules
if isinstance(val, property):
if hasattr(val.fget, '__module__'):
if val.fget.__module__ != module.__name__:
continue
else:
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (valname %s)" % (
val, module, valname)
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
lineno = None
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, string_types):
# obj is a string in the case for objects in the polys package.
# Note that source_lines is a binary string (compiled polys
# modules), which can't be handled by _find_lineno so determine
# the line number here.
docstring = obj
matches = re.findall("line \d+", name)
assert len(matches) == 1, \
"string '%s' does not contain lineno " % name
# NOTE: this is not the exact linenumber but its better than no
# lineno ;)
lineno = int(matches[0][5:])
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, string_types):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# check that properties have a docstring because _find_lineno
# assumes it
if isinstance(obj, property):
if obj.fget.__doc__ is None:
return None
# Find the docstring's location in the file.
if lineno is None:
# handling of properties is not implemented in _find_lineno so do
# it here
if hasattr(obj, 'func_closure') and obj.func_closure is not None:
tobj = obj.func_closure[0].cell_contents
elif isinstance(obj, property):
tobj = obj.fget
else:
tobj = obj
lineno = self._find_lineno(tobj, source_lines)
if lineno is None:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
if hasattr(obj, '_doctest_depends_on'):
globs['_doctest_depends_on'] = obj._doctest_depends_on
else:
globs['_doctest_depends_on'] = {}
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
class SymPyDocTestRunner(DocTestRunner):
"""
A class used to run DocTest test cases, and accumulate statistics.
The ``run`` method is used to process a single DocTest case. It
returns a tuple ``(f, t)``, where ``t`` is the number of test cases
tried, and ``f`` is the number of test cases that failed.
Modified from the doctest version to not reset the sys.displayhook (see
issue 5140).
See the docstring of the original DocTestRunner for more information.
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in ``test``, and display the results using the
writer function ``out``.
The examples are run in the namespace ``test.globs``. If
``clear_globs`` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use ``clear_globs=False``.
``compileflags`` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to ``globs``.
The output of each example is checked using
``SymPyDocTestRunner.check_output``, and the results are
formatted by the ``SymPyDocTestRunner.report_*`` methods.
"""
self.test = test
if compileflags is None:
compileflags = pdoctest._extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = pdoctest._OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = pdoctest.linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
test.globs['print_function'] = print_function
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
# We have to override the name mangled methods.
SymPyDocTestRunner._SymPyDocTestRunner__patched_linecache_getlines = \
DocTestRunner._DocTestRunner__patched_linecache_getlines
SymPyDocTestRunner._SymPyDocTestRunner__run = DocTestRunner._DocTestRunner__run
SymPyDocTestRunner._SymPyDocTestRunner__record_outcome = \
DocTestRunner._DocTestRunner__record_outcome
class SymPyOutputChecker(pdoctest.OutputChecker):
"""
Compared to the OutputChecker from the stdlib our OutputChecker class
supports numerical comparison of floats occuring in the output of the
doctest examples
"""
def __init__(self):
# NOTE OutputChecker is an old-style class with no __init__ method,
# so we can't call the base class version of __init__ here
got_floats = r'(\d+\.\d*|\.\d+)'
# floats in the 'want' string may contain ellipses
want_floats = got_floats + r'(\.{3})?'
front_sep = r'\s|\+|\-|\*|,'
back_sep = front_sep + r'|j|e'
fbeg = r'^%s(?=%s|$)' % (got_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, got_floats, back_sep)
self.num_got_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
fbeg = r'^%s(?=%s|$)' % (want_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, want_floats, back_sep)
self.num_want_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# TODO parse integers as well ?
# Parse floats and compare them. If some of the parsed floats contain
# ellipses, skip the comparison.
matches = self.num_got_rgx.finditer(got)
numbers_got = [match.group(1) for match in matches] # list of strs
matches = self.num_want_rgx.finditer(want)
numbers_want = [match.group(1) for match in matches] # list of strs
if len(numbers_got) != len(numbers_want):
return False
if len(numbers_got) > 0:
nw_ = []
for ng, nw in zip(numbers_got, numbers_want):
if '...' in nw:
nw_.append(ng)
continue
else:
nw_.append(nw)
if abs(float(ng)-float(nw)) > 1e-5:
return False
got = self.num_got_rgx.sub(r'%s', got)
got = got % tuple(nw_)
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & pdoctest.NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & pdoctest.ELLIPSIS:
if pdoctest._ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
class Reporter(object):
"""
Parent class for all reporters.
"""
pass
class PyTestReporter(Reporter):
"""
Py.test like reporter. Should produce output identical to py.test.
"""
def __init__(self, verbose=False, tb="short", colors=True,
force_colors=False, split=None):
self._verbose = verbose
self._tb_style = tb
self._colors = colors
self._force_colors = force_colors
self._xfailed = 0
self._xpassed = []
self._failed = []
self._failed_doctest = []
self._passed = 0
self._skipped = 0
self._exceptions = []
self._terminal_width = None
self._default_width = 80
self._split = split
# TODO: Should these be protected?
self.slow_test_functions = []
self.fast_test_functions = []
# this tracks the x-position of the cursor (useful for positioning
# things on the screen), without the need for any readline library:
self._write_pos = 0
self._line_wrap = False
def root_dir(self, dir):
self._root_dir = dir
@property
def terminal_width(self):
if self._terminal_width is not None:
return self._terminal_width
def findout_terminal_width():
if sys.platform == "win32":
# Windows support is based on:
#
# http://code.activestate.com/recipes/
# 440694-determine-size-of-console-window-on-windows/
from ctypes import windll, create_string_buffer
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
import struct
(_, _, _, _, _, left, _, right, _, _, _) = \
struct.unpack("hhhhHhhhhhh", csbi.raw)
return right - left
else:
return self._default_width
if hasattr(sys.stdout, 'isatty') and not sys.stdout.isatty():
return self._default_width # leave PIPEs alone
try:
process = subprocess.Popen(['stty', '-a'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout = process.stdout.read()
if PY3:
stdout = stdout.decode("utf-8")
except (OSError, IOError):
pass
else:
# We support the following output formats from stty:
#
# 1) Linux -> columns 80
# 2) OS X -> 80 columns
# 3) Solaris -> columns = 80
re_linux = r"columns\s+(?P<columns>\d+);"
re_osx = r"(?P<columns>\d+)\s*columns;"
re_solaris = r"columns\s+=\s+(?P<columns>\d+);"
for regex in (re_linux, re_osx, re_solaris):
match = re.search(regex, stdout)
if match is not None:
columns = match.group('columns')
try:
width = int(columns)
except ValueError:
pass
if width != 0:
return width
return self._default_width
width = findout_terminal_width()
self._terminal_width = width
return width
def write(self, text, color="", align="left", width=None,
force_colors=False):
"""
Prints a text on the screen.
It uses sys.stdout.write(), so no readline library is necessary.
Parameters
==========
color : choose from the colors below, "" means default color
align : "left"/"right", "left" is a normal print, "right" is aligned on
the right-hand side of the screen, filled with spaces if
necessary
width : the screen width
"""
color_templates = (
("Black", "0;30"),
("Red", "0;31"),
("Green", "0;32"),
("Brown", "0;33"),
("Blue", "0;34"),
("Purple", "0;35"),
("Cyan", "0;36"),
("LightGray", "0;37"),
("DarkGray", "1;30"),
("LightRed", "1;31"),
("LightGreen", "1;32"),
("Yellow", "1;33"),
("LightBlue", "1;34"),
("LightPurple", "1;35"),
("LightCyan", "1;36"),
("White", "1;37"),
)
colors = {}
for name, value in color_templates:
colors[name] = value
c_normal = '\033[0m'
c_color = '\033[%sm'
if width is None:
width = self.terminal_width
if align == "right":
if self._write_pos + len(text) > width:
# we don't fit on the current line, create a new line
self.write("\n")
self.write(" "*(width - self._write_pos - len(text)))
if not self._force_colors and hasattr(sys.stdout, 'isatty') and not \
sys.stdout.isatty():
# the stdout is not a terminal, this for example happens if the
# output is piped to less, e.g. "bin/test | less". In this case,
# the terminal control sequences would be printed verbatim, so
# don't use any colors.
color = ""
elif sys.platform == "win32":
# Windows consoles don't support ANSI escape sequences
color = ""
elif not self._colors:
color = ""
if self._line_wrap:
if text[0] != "\n":
sys.stdout.write("\n")
# Avoid UnicodeEncodeError when printing out test failures
if PY3 and IS_WINDOWS:
text = text.encode('raw_unicode_escape').decode('utf8', 'ignore')
elif PY3 and not sys.stdout.encoding.lower().startswith('utf'):
text = text.encode(sys.stdout.encoding, 'backslashreplace'
).decode(sys.stdout.encoding)
if color == "":
sys.stdout.write(text)
else:
sys.stdout.write("%s%s%s" %
(c_color % colors[color], text, c_normal))
sys.stdout.flush()
l = text.rfind("\n")
if l == -1:
self._write_pos += len(text)
else:
self._write_pos = len(text) - l - 1
self._line_wrap = self._write_pos >= width
self._write_pos %= width
def write_center(self, text, delim="="):
width = self.terminal_width
if text != "":
text = " %s " % text
idx = (width - len(text)) // 2
t = delim*idx + text + delim*(width - idx - len(text))
self.write(t + "\n")
def write_exception(self, e, val, tb):
t = traceback.extract_tb(tb)
# remove the first item, as that is always runtests.py
t = t[1:]
t = traceback.format_list(t)
self.write("".join(t))
t = traceback.format_exception_only(e, val)
self.write("".join(t))
def start(self, seed=None, msg="test process starts"):
self.write_center(msg)
executable = sys.executable
v = tuple(sys.version_info)
python_version = "%s.%s.%s-%s-%s" % v
implementation = platform.python_implementation()
if implementation == 'PyPy':
implementation += " %s.%s.%s-%s-%s" % sys.pypy_version_info
self.write("executable: %s (%s) [%s]\n" %
(executable, python_version, implementation))
from .misc import ARCH
self.write("architecture: %s\n" % ARCH)
from sympy.core.cache import USE_CACHE
self.write("cache: %s\n" % USE_CACHE)
from sympy.core.compatibility import GROUND_TYPES, HAS_GMPY
version = ''
if GROUND_TYPES =='gmpy':
if HAS_GMPY == 1:
import gmpy
elif HAS_GMPY == 2:
import gmpy2 as gmpy
version = gmpy.version()
self.write("ground types: %s %s\n" % (GROUND_TYPES, version))
if seed is not None:
self.write("random seed: %d\n" % seed)
from .misc import HASH_RANDOMIZATION
self.write("hash randomization: ")
hash_seed = os.getenv("PYTHONHASHSEED") or '0'
if HASH_RANDOMIZATION and (hash_seed == "random" or int(hash_seed)):
self.write("on (PYTHONHASHSEED=%s)\n" % hash_seed)
else:
self.write("off\n")
if self._split:
self.write("split: %s\n" % self._split)
self.write('\n')
self._t_start = clock()
def finish(self):
self._t_end = clock()
self.write("\n")
global text, linelen
text = "tests finished: %d passed, " % self._passed
linelen = len(text)
def add_text(mytext):
global text, linelen
"""Break new text if too long."""
if linelen + len(mytext) > self.terminal_width:
text += '\n'
linelen = 0
text += mytext
linelen += len(mytext)
if len(self._failed) > 0:
add_text("%d failed, " % len(self._failed))
if len(self._failed_doctest) > 0:
add_text("%d failed, " % len(self._failed_doctest))
if self._skipped > 0:
add_text("%d skipped, " % self._skipped)
if self._xfailed > 0:
add_text("%d expected to fail, " % self._xfailed)
if len(self._xpassed) > 0:
add_text("%d expected to fail but passed, " % len(self._xpassed))
if len(self._exceptions) > 0:
add_text("%d exceptions, " % len(self._exceptions))
add_text("in %.2f seconds" % (self._t_end - self._t_start))
if self.slow_test_functions:
self.write_center('slowest tests', '_')
sorted_slow = sorted(self.slow_test_functions, key=lambda r: r[1])
for slow_func_name, taken in sorted_slow:
print('%s - Took %.3f seconds' % (slow_func_name, taken))
if self.fast_test_functions:
self.write_center('unexpectedly fast tests', '_')
sorted_fast = sorted(self.fast_test_functions,
key=lambda r: r[1])
for fast_func_name, taken in sorted_fast:
print('%s - Took %.3f seconds' % (fast_func_name, taken))
if len(self._xpassed) > 0:
self.write_center("xpassed tests", "_")
for e in self._xpassed:
self.write("%s: %s\n" % (e[0], e[1]))
self.write("\n")
if self._tb_style != "no" and len(self._exceptions) > 0:
for e in self._exceptions:
filename, f, (t, val, tb) = e
self.write_center("", "_")
if f is None:
s = "%s" % filename
else:
s = "%s:%s" % (filename, f.__name__)
self.write_center(s, "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed) > 0:
for e in self._failed:
filename, f, (t, val, tb) = e
self.write_center("", "_")
self.write_center("%s:%s" % (filename, f.__name__), "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed_doctest) > 0:
for e in self._failed_doctest:
filename, msg = e
self.write_center("", "_")
self.write_center("%s" % filename, "_")
self.write(msg)
self.write("\n")
self.write_center(text)
ok = len(self._failed) == 0 and len(self._exceptions) == 0 and \
len(self._failed_doctest) == 0
if not ok:
self.write("DO *NOT* COMMIT!\n")
return ok
def entering_filename(self, filename, n):
rel_name = filename[len(self._root_dir) + 1:]
self._active_file = rel_name
self._active_file_error = False
self.write(rel_name)
self.write("[%d] " % n)
def leaving_filename(self):
self.write(" ")
if self._active_file_error:
self.write("[FAIL]", "Red", align="right")
else:
self.write("[OK]", "Green", align="right")
self.write("\n")
if self._verbose:
self.write("\n")
def entering_test(self, f):
self._active_f = f
if self._verbose:
self.write("\n" + f.__name__ + " ")
def test_xfail(self):
self._xfailed += 1
self.write("f", "Green")
def test_xpass(self, v):
message = str(v)
self._xpassed.append((self._active_file, message))
self.write("X", "Green")
def test_fail(self, exc_info):
self._failed.append((self._active_file, self._active_f, exc_info))
self.write("F", "Red")
self._active_file_error = True
def doctest_fail(self, name, error_msg):
# the first line contains "******", remove it:
error_msg = "\n".join(error_msg.split("\n")[1:])
self._failed_doctest.append((name, error_msg))
self.write("F", "Red")
self._active_file_error = True
def test_pass(self, char="."):
self._passed += 1
if self._verbose:
self.write("ok", "Green")
else:
self.write(char, "Green")
def test_skip(self, v=None):
char = "s"
self._skipped += 1
if v is not None:
message = str(v)
if message == "KeyboardInterrupt":
char = "K"
elif message == "Timeout":
char = "T"
elif message == "Slow":
char = "w"
self.write(char, "Blue")
if self._verbose:
self.write(" - ", "Blue")
if v is not None:
self.write(message, "Blue")
def test_exception(self, exc_info):
self._exceptions.append((self._active_file, self._active_f, exc_info))
self.write("E", "Red")
self._active_file_error = True
def import_error(self, filename, exc_info):
self._exceptions.append((filename, None, exc_info))
rel_name = filename[len(self._root_dir) + 1:]
self.write(rel_name)
self.write("[?] Failed to import", "Red")
self.write(" ")
self.write("[FAIL]", "Red", align="right")
self.write("\n")
sympy_dir = get_sympy_dir()
|
bsd-3-clause
|
hsuantien/scikit-learn
|
sklearn/linear_model/ridge.py
|
89
|
39360
|
"""
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
coefs[i] = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)[0]
return coefs
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def _rescale_data(X, y, sample_weight):
"""Rescale data so as to support sample_weight"""
n_samples = X.shape[0]
sample_weight = sample_weight * np.ones(n_samples)
sample_weight = np.sqrt(sample_weight)
sw_matrix = sparse.dia_matrix((sample_weight, 0),
shape=(n_samples, n_samples))
X = safe_sparse_dot(sw_matrix, X)
y = safe_sparse_dot(sw_matrix, y)
return X, y
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is set, then
the solver will automatically be set to 'cholesky'
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional information
depending on the solver used.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
Notes
-----
This function won't compute the intercept.
"""
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in
# any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr'):
raise ValueError('Solver %s not understood' % solver)
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == "lsqr":
coef = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_ = ridge_regression(X, y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver=self.solver)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}
shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational
routines. 'svd' will use a Singular value decomposition to obtain
the solution, 'cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropriate depending on the matrix X. 'lsqr' uses
a direct regularized least-squares routine provided by scipy.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_classes, n_features]
Weight vector(s).
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto"):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight' : sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
If an integer is passed, it is the number of folds for KFold cross
validation. Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter.
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
|
bsd-3-clause
|
nhejazi/scikit-learn
|
sklearn/model_selection/tests/test_search.py
|
3
|
59074
|
"""Test the search module"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from sklearn.externals.joblib._compat import PY3_OR_LATER
from itertools import chain, product
import pickle
import sys
from types import GeneratorType
import re
import numpy as np
import scipy.sparse as sp
from sklearn.utils.fixes import sp_version
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.base import BaseEstimator
from sklearn.base import clone
from sklearn.exceptions import NotFittedError
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection import fit_grid_point
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
from sklearn.model_selection._validation import FitFailedWarning
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import Ridge, SGDClassifier
from sklearn.model_selection.tests.common import OneTimeSplitter
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.classes_ = np.unique(Y)
return self
def predict(self, T):
return T.shape[0]
def transform(self, X):
return X + self.foo_param
def inverse_transform(self, X):
return X - self.foo_param
predict_proba = predict
predict_log_proba = predict
decision_function = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
assert_array_equal(grid_search.cv_results_["param_foo_param"].data,
[1, 2, 3])
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
def check_hyperparameter_searcher_with_fit_params(klass, **klass_kwargs):
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam', 'eggs'])
searcher = klass(clf, {'foo_param': [1, 2, 3]}, cv=2, **klass_kwargs)
# The CheckingClassifer generates an assertion error if
# a parameter is missing or has length != len(X).
assert_raise_message(AssertionError,
"Expected fit parameter(s) ['eggs'] not seen.",
searcher.fit, X, y, spam=np.ones(10))
assert_raise_message(AssertionError,
"Fit parameter spam has length 1; expected 4.",
searcher.fit, X, y, spam=np.ones(1),
eggs=np.zeros(10))
searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10))
def test_grid_search_with_fit_params():
check_hyperparameter_searcher_with_fit_params(GridSearchCV)
def test_random_search_with_fit_params():
check_hyperparameter_searcher_with_fit_params(RandomizedSearchCV, n_iter=1)
def test_grid_search_fit_params_deprecation():
# NOTE: Remove this test in v0.21
# Use of `fit_params` in the class constructor is deprecated,
# but will still work until v0.21.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam'])
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]},
fit_params={'spam': np.ones(10)})
assert_warns(DeprecationWarning, grid_search.fit, X, y)
def test_grid_search_fit_params_two_places():
# NOTE: Remove this test in v0.21
# If users try to input fit parameters in both
# the constructor (deprecated use) and the `fit`
# method, we'll ignore the values passed to the constructor.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam'])
# The "spam" array is too short and will raise an
# error in the CheckingClassifier if used.
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]},
fit_params={'spam': np.ones(1)})
expected_warning = ('Ignoring fit_params passed as a constructor '
'argument in favor of keyword arguments to '
'the "fit" method.')
assert_warns_message(RuntimeWarning, expected_warning,
grid_search.fit, X, y, spam=np.ones(10))
# Verify that `fit` prefers its own kwargs by giving valid
# kwargs in the constructor and invalid in the method call
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]},
fit_params={'spam': np.ones(10)})
assert_raise_message(AssertionError, "Fit parameter spam has length 1",
grid_search.fit, X, y, spam=np.ones(1))
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = search_no_scoring.score(X, y)
score_accuracy = search_accuracy.score(X, y)
score_no_score_auc = search_no_score_method_auc.score(X, y)
score_auc = search_auc.score(X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_groups():
# Check if ValueError (when groups is None) propagates to GridSearchCV
# And also check if groups is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
groups = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(),
GroupShuffleSplit()]
for cv in group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
assert_raise_message(ValueError,
"The 'groups' parameter should not be None.",
gs.fit, X, y)
gs.fit(X, y, groups=groups)
non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_classes__property():
# Test that classes_ property matches best_estimator_.classes_
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
Cs = [.1, 1, 10]
grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
grid_search.fit(X, y)
assert_array_equal(grid_search.best_estimator_.classes_,
grid_search.classes_)
# Test that regressors do not have a classes_ attribute
grid_search = GridSearchCV(Ridge(), {'alpha': [1.0, 2.0]})
grid_search.fit(X, y)
assert_false(hasattr(grid_search, 'classes_'))
# Test that the grid searcher has no classes_ attribute before it's fit
grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
assert_false(hasattr(grid_search, 'classes_'))
# Test that the grid searcher has no classes_ attribute without a refit
grid_search = GridSearchCV(LinearSVC(random_state=0),
{'C': Cs}, refit=False)
grid_search.fit(X, y)
assert_false(hasattr(grid_search, 'classes_'))
def test_trivial_cv_results_attr():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
def test_no_refit():
# Test that GSCV can be used for model selection alone without refitting
clf = MockClassifier()
for scoring in [None, ['accuracy', 'precision']]:
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(not hasattr(grid_search, "best_estimator_") and
hasattr(grid_search, "best_index_") and
hasattr(grid_search, "best_params_"))
# Make sure the functions predict/transform etc raise meaningful
# error messages
for fn_name in ('predict', 'predict_proba', 'predict_log_proba',
'transform', 'inverse_transform'):
assert_raise_message(NotFittedError,
('refit=False. %s is available only after '
'refitting on the best parameters'
% fn_name), getattr(grid_search, fn_name), X)
# Test that an invalid refit param raises appropriate error messages
for refit in ["", 5, True, 'recall', 'accuracy']:
assert_raise_message(ValueError, "For multi-metric scoring, the "
"parameter refit must be set to a scorer key",
GridSearchCV(clf, {}, refit=refit,
scoring={'acc': 'accuracy',
'prec': 'precision'}).fit,
X, y)
def test_grid_search_error():
# Test that grid search will capture errors on data with different length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_when_param_grid_includes_range():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = None
if PY3_OR_LATER:
grid_search = GridSearchCV(clf, {'foo_param': range(1, 4)})
else:
grid_search = GridSearchCV(clf, {'foo_param': xrange(1, 4)})
grid_search.fit(X, y)
assert_equal(grid_search.best_estimator_.foo_param, 2)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a sequence"
"(but not a string) or np.ndarray.",
GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a non-empty sequence.",
GridSearchCV, clf, param_dict)
param_dict = {"C": "1,2,3"}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a sequence"
"(but not a string) or np.ndarray.",
GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n_splits=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n_splits=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
@ignore_warnings
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
def check_df(x):
return isinstance(x, InputFeatureType)
def check_series(x):
return isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "cv_results_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
# Multi-metric evaluation unsupervised
scoring = ['adjusted_rand_score', 'fowlkes_mallows_score']
for refit in ['adjusted_rand_score', 'fowlkes_mallows_score']:
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring=scoring, refit=refit)
grid_search.fit(X, y)
# Both ARI and FMS can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Single metric evaluation unsupervised
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='fowlkes_mallows_score')
grid_search.fit(X, y)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=3, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
if sp_version >= (0, 16):
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
def check_cv_results_array_types(search, param_keys, score_keys):
# Check if the search `cv_results`'s array are of correct types
cv_results = search.cv_results_
assert_true(all(isinstance(cv_results[param], np.ma.MaskedArray)
for param in param_keys))
assert_true(all(cv_results[key].dtype == object for key in param_keys))
assert_false(any(isinstance(cv_results[key], np.ma.MaskedArray)
for key in score_keys))
assert_true(all(cv_results[key].dtype == np.float64
for key in score_keys if not key.startswith('rank')))
scorer_keys = search.scorer_.keys() if search.multimetric_ else ['score']
for key in scorer_keys:
assert_true(cv_results['rank_test_%s' % key].dtype == np.int32)
def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand):
# Test the search.cv_results_ contains all the required results
assert_array_equal(sorted(cv_results.keys()),
sorted(param_keys + score_keys + ('params',)))
assert_true(all(cv_results[key].shape == (n_cand,)
for key in param_keys + score_keys))
def check_cv_results_grid_scores_consistency(search):
# TODO Remove test in 0.20
if search.multimetric_:
assert_raise_message(AttributeError, "not available for multi-metric",
getattr, search, 'grid_scores_')
else:
cv_results = search.cv_results_
res_scores = np.vstack(list([cv_results["split%d_test_score" % i]
for i in range(search.n_splits_)])).T
res_means = cv_results["mean_test_score"]
res_params = cv_results["params"]
n_cand = len(res_params)
grid_scores = assert_warns(DeprecationWarning, getattr,
search, 'grid_scores_')
assert_equal(len(grid_scores), n_cand)
# Check consistency of the structure of grid_scores
for i in range(n_cand):
assert_equal(grid_scores[i].parameters, res_params[i])
assert_array_equal(grid_scores[i].cv_validation_scores,
res_scores[i, :])
assert_array_equal(grid_scores[i].mean_validation_score,
res_means[i])
def test_grid_search_cv_results():
X, y = make_classification(n_samples=50, n_features=4,
random_state=42)
n_splits = 3
n_grid_points = 6
params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]),
dict(kernel=['poly', ], degree=[1, 2])]
param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel')
score_keys = ('mean_test_score', 'mean_train_score',
'rank_test_score',
'split0_test_score', 'split1_test_score',
'split2_test_score',
'split0_train_score', 'split1_train_score',
'split2_train_score',
'std_test_score', 'std_train_score',
'mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time')
n_candidates = n_grid_points
for iid in (False, True):
search = GridSearchCV(SVC(), cv=n_splits, iid=iid, param_grid=params)
search.fit(X, y)
assert_equal(iid, search.iid)
cv_results = search.cv_results_
# Check if score and timing are reasonable
assert_true(all(cv_results['rank_test_score'] >= 1))
assert_true(all(cv_results[k] >= 0) for k in score_keys
if k is not 'rank_test_score')
assert_true(all(cv_results[k] <= 1) for k in score_keys
if 'time' not in k and
k is not 'rank_test_score')
# Check cv_results structure
check_cv_results_array_types(search, param_keys, score_keys)
check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates)
# Check masking
cv_results = search.cv_results_
n_candidates = len(search.cv_results_['params'])
assert_true(all((cv_results['param_C'].mask[i] and
cv_results['param_gamma'].mask[i] and
not cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'linear'))
assert_true(all((not cv_results['param_C'].mask[i] and
not cv_results['param_gamma'].mask[i] and
cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'rbf'))
check_cv_results_grid_scores_consistency(search)
def test_random_search_cv_results():
X, y = make_classification(n_samples=50, n_features=4, random_state=42)
n_splits = 3
n_search_iter = 30
params = dict(C=expon(scale=10), gamma=expon(scale=0.1))
param_keys = ('param_C', 'param_gamma')
score_keys = ('mean_test_score', 'mean_train_score',
'rank_test_score',
'split0_test_score', 'split1_test_score',
'split2_test_score',
'split0_train_score', 'split1_train_score',
'split2_train_score',
'std_test_score', 'std_train_score',
'mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time')
n_cand = n_search_iter
for iid in (False, True):
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_splits,
iid=iid, param_distributions=params)
search.fit(X, y)
assert_equal(iid, search.iid)
cv_results = search.cv_results_
# Check results structure
check_cv_results_array_types(search, param_keys, score_keys)
check_cv_results_keys(cv_results, param_keys, score_keys, n_cand)
# For random_search, all the param array vals should be unmasked
assert_false(any(cv_results['param_C'].mask) or
any(cv_results['param_gamma'].mask))
check_cv_results_grid_scores_consistency(search)
def test_search_iid_param():
# Test the IID parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(SVC(), param_grid={'C': [1, 10]}, cv=cv)
random_search = RandomizedSearchCV(SVC(), n_iter=2,
param_distributions={'C': [1, 10]},
cv=cv)
for search in (grid_search, random_search):
search.fit(X, y)
assert_true(search.iid)
test_cv_scores = np.array(list(search.cv_results_['split%d_test_score'
% s_i][0]
for s_i in range(search.n_splits_)))
train_cv_scores = np.array(list(search.cv_results_['split%d_train_'
'score' % s_i][0]
for s_i in range(search.n_splits_)))
test_mean = search.cv_results_['mean_test_score'][0]
test_std = search.cv_results_['std_test_score'][0]
train_cv_scores = np.array(list(search.cv_results_['split%d_train_'
'score' % s_i][0]
for s_i in range(search.n_splits_)))
train_mean = search.cv_results_['mean_train_score'][0]
train_std = search.cv_results_['std_train_score'][0]
# Test the first candidate
assert_equal(search.cv_results_['param_C'][0], 1)
assert_array_almost_equal(test_cv_scores, [1, 1. / 3.])
assert_array_almost_equal(train_cv_scores, [1, 1])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average and weighted std
expected_test_mean = 1 * 1. / 4. + 1. / 3. * 3. / 4.
expected_test_std = np.sqrt(1. / 4 * (expected_test_mean - 1) ** 2 +
3. / 4 * (expected_test_mean - 1. / 3.) **
2)
assert_almost_equal(test_mean, expected_test_mean)
assert_almost_equal(test_std, expected_test_std)
# For the train scores, we do not take a weighted mean irrespective of
# i.i.d. or not
assert_almost_equal(train_mean, 1)
assert_almost_equal(train_std, 0)
# once with iid=False
grid_search = GridSearchCV(SVC(),
param_grid={'C': [1, 10]},
cv=cv, iid=False)
random_search = RandomizedSearchCV(SVC(), n_iter=2,
param_distributions={'C': [1, 10]},
cv=cv, iid=False)
for search in (grid_search, random_search):
search.fit(X, y)
assert_false(search.iid)
test_cv_scores = np.array(list(search.cv_results_['split%d_test_score'
% s][0]
for s in range(search.n_splits_)))
test_mean = search.cv_results_['mean_test_score'][0]
test_std = search.cv_results_['std_test_score'][0]
train_cv_scores = np.array(list(search.cv_results_['split%d_train_'
'score' % s][0]
for s in range(search.n_splits_)))
train_mean = search.cv_results_['mean_train_score'][0]
train_std = search.cv_results_['std_train_score'][0]
assert_equal(search.cv_results_['param_C'][0], 1)
# scores are the same as above
assert_array_almost_equal(test_cv_scores, [1, 1. / 3.])
# Unweighted mean/std is used
assert_almost_equal(test_mean, np.mean(test_cv_scores))
assert_almost_equal(test_std, np.std(test_cv_scores))
# For the train scores, we do not take a weighted mean irrespective of
# i.i.d. or not
assert_almost_equal(train_mean, 1)
assert_almost_equal(train_std, 0)
def test_grid_search_cv_results_multimetric():
X, y = make_classification(n_samples=50, n_features=4, random_state=42)
n_splits = 3
params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]),
dict(kernel=['poly', ], degree=[1, 2])]
for iid in (False, True):
grid_searches = []
for scoring in ({'accuracy': make_scorer(accuracy_score),
'recall': make_scorer(recall_score)},
'accuracy', 'recall'):
grid_search = GridSearchCV(SVC(), cv=n_splits, iid=iid,
param_grid=params, scoring=scoring,
refit=False)
grid_search.fit(X, y)
assert_equal(grid_search.iid, iid)
grid_searches.append(grid_search)
compare_cv_results_multimetric_with_single(*grid_searches, iid=iid)
def test_random_search_cv_results_multimetric():
X, y = make_classification(n_samples=50, n_features=4, random_state=42)
n_splits = 3
n_search_iter = 30
scoring = ('accuracy', 'recall')
# Scipy 0.12's stats dists do not accept seed, hence we use param grid
params = dict(C=np.logspace(-10, 1), gamma=np.logspace(-5, 0, base=0.1))
for iid in (True, False):
for refit in (True, False):
random_searches = []
for scoring in (('accuracy', 'recall'), 'accuracy', 'recall'):
# If True, for multi-metric pass refit='accuracy'
if refit:
refit = 'accuracy' if isinstance(scoring, tuple) else refit
clf = SVC(probability=True, random_state=42)
random_search = RandomizedSearchCV(clf, n_iter=n_search_iter,
cv=n_splits, iid=iid,
param_distributions=params,
scoring=scoring,
refit=refit, random_state=0)
random_search.fit(X, y)
random_searches.append(random_search)
compare_cv_results_multimetric_with_single(*random_searches,
iid=iid)
if refit:
compare_refit_methods_when_refit_with_acc(
random_searches[0], random_searches[1], refit)
def compare_cv_results_multimetric_with_single(
search_multi, search_acc, search_rec, iid):
"""Compare multi-metric cv_results with the ensemble of multiple
single metric cv_results from single metric grid/random search"""
assert_equal(search_multi.iid, iid)
assert_true(search_multi.multimetric_)
assert_array_equal(sorted(search_multi.scorer_),
('accuracy', 'recall'))
cv_results_multi = search_multi.cv_results_
cv_results_acc_rec = {re.sub('_score$', '_accuracy', k): v
for k, v in search_acc.cv_results_.items()}
cv_results_acc_rec.update({re.sub('_score$', '_recall', k): v
for k, v in search_rec.cv_results_.items()})
# Check if score and timing are reasonable, also checks if the keys
# are present
assert_true(all((np.all(cv_results_multi[k] <= 1) for k in (
'mean_score_time', 'std_score_time', 'mean_fit_time',
'std_fit_time'))))
# Compare the keys, other than time keys, among multi-metric and
# single metric grid search results. np.testing.assert_equal performs a
# deep nested comparison of the two cv_results dicts
np.testing.assert_equal({k: v for k, v in cv_results_multi.items()
if not k.endswith('_time')},
{k: v for k, v in cv_results_acc_rec.items()
if not k.endswith('_time')})
def compare_refit_methods_when_refit_with_acc(search_multi, search_acc, refit):
"""Compare refit multi-metric search methods with single metric methods"""
if refit:
assert_equal(search_multi.refit, 'accuracy')
else:
assert_false(search_multi.refit)
assert_equal(search_acc.refit, refit)
X, y = make_blobs(n_samples=100, n_features=4, random_state=42)
for method in ('predict', 'predict_proba', 'predict_log_proba'):
assert_almost_equal(getattr(search_multi, method)(X),
getattr(search_acc, method)(X))
assert_almost_equal(search_multi.score(X, y), search_acc.score(X, y))
for key in ('best_index_', 'best_score_', 'best_params_'):
assert_equal(getattr(search_multi, key), getattr(search_acc, key))
def test_search_cv_results_rank_tie_breaking():
X, y = make_blobs(n_samples=50, random_state=42)
# The two C values are close enough to give similar models
# which would result in a tie of their mean cv-scores
param_grid = {'C': [1, 1.001, 0.001]}
grid_search = GridSearchCV(SVC(), param_grid=param_grid)
random_search = RandomizedSearchCV(SVC(), n_iter=3,
param_distributions=param_grid)
for search in (grid_search, random_search):
search.fit(X, y)
cv_results = search.cv_results_
# Check tie breaking strategy -
# Check that there is a tie in the mean scores between
# candidates 1 and 2 alone
assert_almost_equal(cv_results['mean_test_score'][0],
cv_results['mean_test_score'][1])
assert_almost_equal(cv_results['mean_train_score'][0],
cv_results['mean_train_score'][1])
assert_false(np.allclose(cv_results['mean_test_score'][1],
cv_results['mean_test_score'][2]))
assert_false(np.allclose(cv_results['mean_train_score'][1],
cv_results['mean_train_score'][2]))
# 'min' rank should be assigned to the tied candidates
assert_almost_equal(search.cv_results_['rank_test_score'], [1, 1, 3])
def test_search_cv_results_none_param():
X, y = [[1], [2], [3], [4], [5]], [0, 0, 0, 0, 1]
estimators = (DecisionTreeRegressor(), DecisionTreeClassifier())
est_parameters = {"random_state": [0, None]}
cv = KFold(random_state=0)
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv).fit(X, y)
assert_array_equal(grid_search.cv_results_['param_random_state'],
[0, None])
@ignore_warnings()
def test_search_cv_timing():
svc = LinearSVC(random_state=0)
X = [[1, ], [2, ], [3, ], [4, ]]
y = [0, 1, 1, 0]
gs = GridSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0)
rs = RandomizedSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0, n_iter=2)
for search in (gs, rs):
search.fit(X, y)
for key in ['mean_fit_time', 'std_fit_time']:
# NOTE The precision of time.time in windows is not high
# enough for the fit/score times to be non-zero for trivial X and y
assert_true(np.all(search.cv_results_[key] >= 0))
assert_true(np.all(search.cv_results_[key] < 1))
for key in ['mean_score_time', 'std_score_time']:
assert_true(search.cv_results_[key][1] >= 0)
assert_true(search.cv_results_[key][0] == 0.0)
assert_true(np.all(search.cv_results_[key] < 1))
def test_grid_search_correct_score_results():
# test that correct scores are used
n_splits = 3
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score, cv=n_splits)
cv_results = grid_search.fit(X, y).cv_results_
# Test scorer names
result_keys = list(cv_results.keys())
expected_keys = (("mean_test_score", "rank_test_score") +
tuple("split%d_test_score" % cv_i
for cv_i in range(n_splits)))
assert_true(all(np.in1d(expected_keys, result_keys)))
cv = StratifiedKFold(n_splits=n_splits)
n_splits = grid_search.n_splits_
for candidate_i, C in enumerate(Cs):
clf.set_params(C=C)
cv_scores = np.array(
list(grid_search.cv_results_['split%d_test_score'
% s][candidate_i]
for s in range(n_splits)))
for i, (train, test) in enumerate(cv.split(X, y)):
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, cv_scores[i])
def test_fit_grid_point():
X, y = make_classification(random_state=0)
cv = StratifiedKFold(random_state=0)
svc = LinearSVC(random_state=0)
scorer = make_scorer(accuracy_score)
for params in ({'C': 0.1}, {'C': 0.01}, {'C': 0.001}):
for train, test in cv.split(X, y):
this_scores, this_params, n_test_samples = fit_grid_point(
X, y, clone(svc), params, train, test,
scorer, verbose=False)
est = clone(svc).set_params(**params)
est.fit(X[train], y[train])
expected_score = scorer(est, X[test], y[test])
# Test the return values of fit_grid_point
assert_almost_equal(this_scores, expected_score)
assert_equal(params, this_params)
assert_equal(n_test_samples, test.size)
# Should raise an error upon multimetric scorer
assert_raise_message(ValueError, "scoring value should either be a "
"callable, string or None.", fit_grid_point, X, y,
svc, params, train, test, {'score': scorer},
verbose=True)
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
grid_search_pickled = pickle.loads(pickle.dumps(grid_search))
assert_array_almost_equal(grid_search.predict(X),
grid_search_pickled.predict(X))
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
random_search_pickled = pickle.loads(pickle.dumps(random_search))
assert_array_almost_equal(random_search.predict(X),
random_search_pickled.predict(X))
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
res_params = grid_search.cv_results_['params']
for cand_i in range(len(res_params)):
est.set_params(**res_params[cand_i])
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(
correct_score,
grid_search.cv_results_['split%d_test_score' % i][cand_i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
res_params = random_search.cv_results_['params']
for cand_i in range(len(res_params)):
est.set_params(**res_params[cand_i])
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(
correct_score,
random_search.cv_results_['split%d_test_score'
% i][cand_i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
n_candidates = len(gs.cv_results_['params'])
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
def get_cand_scores(i):
return np.array(list(gs.cv_results_['split%d_test_score' % s][i]
for s in range(gs.n_splits_)))
assert all((np.all(get_cand_scores(cand_i) == 0.0)
for cand_i in range(n_candidates)
if gs.cv_results_['param_parameter'][cand_i] ==
FailingClassifier.FAILING_PARAMETER))
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
n_candidates = len(gs.cv_results_['params'])
assert all(np.all(np.isnan(get_cand_scores(cand_i)))
for cand_i in range(n_candidates)
if gs.cv_results_['param_parameter'][cand_i] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
def test_stochastic_gradient_loss_param():
# Make sure the predict_proba works when loss is specified
# as one of the parameters in the param_grid.
param_grid = {
'loss': ['log'],
}
X = np.arange(24).reshape(6, -1)
y = [0, 0, 0, 1, 1, 1]
clf = GridSearchCV(estimator=SGDClassifier(tol=1e-3, loss='hinge'),
param_grid=param_grid)
# When the estimator is not fitted, `predict_proba` is not available as the
# loss is 'hinge'.
assert_false(hasattr(clf, "predict_proba"))
clf.fit(X, y)
clf.predict_proba(X)
clf.predict_log_proba(X)
# Make sure `predict_proba` is not available when setting loss=['hinge']
# in param_grid
param_grid = {
'loss': ['hinge'],
}
clf = GridSearchCV(estimator=SGDClassifier(tol=1e-3, loss='hinge'),
param_grid=param_grid)
assert_false(hasattr(clf, "predict_proba"))
clf.fit(X, y)
assert_false(hasattr(clf, "predict_proba"))
def test_search_train_scores_set_to_false():
X = np.arange(6).reshape(6, -1)
y = [0, 0, 0, 1, 1, 1]
clf = LinearSVC(random_state=0)
gs = GridSearchCV(clf, param_grid={'C': [0.1, 0.2]},
return_train_score=False)
gs.fit(X, y)
def test_grid_search_cv_splits_consistency():
# Check if a one time iterable is accepted as a cv parameter.
n_samples = 100
n_splits = 5
X, y = make_classification(n_samples=n_samples, random_state=0)
gs = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=OneTimeSplitter(n_splits=n_splits,
n_samples=n_samples))
gs.fit(X, y)
gs2 = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=KFold(n_splits=n_splits))
gs2.fit(X, y)
# Give generator as a cv parameter
assert_true(isinstance(KFold(n_splits=n_splits,
shuffle=True, random_state=0).split(X, y),
GeneratorType))
gs3 = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=KFold(n_splits=n_splits, shuffle=True,
random_state=0).split(X, y))
gs3.fit(X, y)
gs4 = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=KFold(n_splits=n_splits, shuffle=True,
random_state=0))
gs4.fit(X, y)
def _pop_time_keys(cv_results):
for key in ('mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time'):
cv_results.pop(key)
return cv_results
# Check if generators are supported as cv and
# that the splits are consistent
np.testing.assert_equal(_pop_time_keys(gs3.cv_results_),
_pop_time_keys(gs4.cv_results_))
# OneTimeSplitter is a non-re-entrant cv where split can be called only
# once if ``cv.split`` is called once per param setting in GridSearchCV.fit
# the 2nd and 3rd parameter will not be evaluated as no train/test indices
# will be generated for the 2nd and subsequent cv.split calls.
# This is a check to make sure cv.split is not called once per param
# setting.
np.testing.assert_equal({k: v for k, v in gs.cv_results_.items()
if not k.endswith('_time')},
{k: v for k, v in gs2.cv_results_.items()
if not k.endswith('_time')})
# Check consistency of folds across the parameters
gs = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.1, 0.2, 0.2]},
cv=KFold(n_splits=n_splits, shuffle=True))
gs.fit(X, y)
# As the first two param settings (C=0.1) and the next two param
# settings (C=0.2) are same, the test and train scores must also be
# same as long as the same train/test indices are generated for all
# the cv splits, for both param setting
for score_type in ('train', 'test'):
per_param_scores = {}
for param_i in range(4):
per_param_scores[param_i] = list(
gs.cv_results_['split%d_%s_score' % (s, score_type)][param_i]
for s in range(5))
assert_array_almost_equal(per_param_scores[0],
per_param_scores[1])
assert_array_almost_equal(per_param_scores[2],
per_param_scores[3])
def test_transform_inverse_transform_round_trip():
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
grid_search.fit(X, y)
X_round_trip = grid_search.inverse_transform(grid_search.transform(X))
assert_array_equal(X, X_round_trip)
|
bsd-3-clause
|
SciBase-Project/internationality-journals
|
src/aminer_graph_community.py
|
3
|
2415
|
print "[INFO] Reading aminer_cites.json"
# nodes belonging to each publication
nodes = {}
# self cited edges
edge_list_1 = []
# non self cited edges
edge_list_2 = []
# publication edges
edge_list_3 = []
import json
with open('../output/aminer_cites.json') as data_file:
data = json.load(data_file)
for publication in data :
papers = data[publication]
for paper in papers :
# add edge to publication
src = paper
edge_list_3.append((publication, src))
# add node to respective publication
if publication not in nodes :
nodes[publication] = []
nodes[publication].append(paper)
cites = data[publication][paper]
for cite in cites :
src = paper
dest = cite['index']
# add node to respective publication
cite_pub = cite['publication']
if cite_pub not in nodes :
nodes[cite_pub] = []
nodes[cite_pub].append(dest)
# add edges
edge = (src, dest)
# self cited edge
if cite['self'] == True : edge_list_1.append(edge)
# non self cited edge
else : edge_list_2.append(edge)
# add edge to publication
edge_list_3.append((cite_pub, dest))
# remove all duplicates
edge_list_3 = list(set(edge_list_3))
# remove all duplicates
for pub in nodes :
nodes[pub] = list(set(nodes[pub]))
print "[INFO] Done reading"
print "[INFO] Generating graph"
import networkx as nx
import matplotlib.pyplot as plt
# make a new graph
G = nx.Graph()
all_edges = []
all_edges.extend(edge_list_1)
all_edges.extend(edge_list_2)
all_edges.extend(edge_list_3)
G.add_edges_from(all_edges)
# positions for all nodes
pos = nx.spring_layout(G)
# draw set of nodes
for pub in nodes :
from random import random
nx.draw_networkx_nodes(G, pos, nodelist=nodes[pub], node_size=15, node_color=(random(), random(), random()))
# draw set of edges from self cited list
nx.draw_networkx_edges(G,pos, edgelist=edge_list_1, width=1, edge_color='r')
# draw set of edges from non self cited list
nx.draw_networkx_edges(G,pos, edgelist=edge_list_2, width=1, edge_color='g')
# draw set of edges to publications
# nx.draw_networkx_edges(G,pos, edgelist=edge_list_3, width=1, edge_color='k')
plt.show() # display
print "[INFO] Done generating graph"
|
mit
|
weegreenblobbie/nsound
|
src/examples/make_oscilloscope_video.py
|
1
|
7428
|
"""
$Id: make_oscilloscope_video.py 877 2014-11-13 03:51:39Z weegreenblobbie $
Nsound is a C++ library and Python module for audio synthesis featuring
dynamic digital filters. Nsound lets you easily shape waveforms and write
to disk or plot them. Nsound aims to be as powerful as Csound but easy to
use.
Copyright (c) 2004 to Present Nick Hilton
weegreenblobbie_at_yahoo_com
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Library General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
import argparse
import glob
import os
import sys
from matplotlib import pylab
import Nsound as ns
import numpy as np
#------------------------------------------------------------------------------
# Globals
png_filename = 'oscilloscope_%010d.png'
#------------------------------------------------------------------------------
# utility functions
def get_ydata(sr, signal, t, width):
'''
Samples the signal at pos : pos + width. Then computes the location of the
peak, and resamples the signal such that the peak is always loacted in the
same position.
'''
pos = int(t * sr + 0.5)
s0 = pos
s1 = s0 + width
if s1 > len(signal):
s1 = len(signal) - 1
s0 = s1 - width
datay = np.array(signal[s0 : s1])
# get location of the peak
peak_idx = datay.argmax()
# Center peak at 33% of windows
global_index = pos + peak_idx
s0 = int(global_index - 0.33333 * width)
s1 = s0 + width
if s0 >= len(signal):
return np.zeros(width)
if s1 >= len(signal):
#~ datay = signal[s0:]
#~ n = width - len(datay)
#~ datay = np.concatenate([datay, np.zeros(n)])
#~ return datay
return np.zeros(width)
return signal[s0 : s1]
def main():
parser = argparse.ArgumentParser(description = 'oscillocope')
parser.add_argument(
'-c',
'--channel',
dest = 'channel',
default = 0,
type = int,
help = "Input channel to use if n_channels > 1")
parser.add_argument(
'--fps',
dest = 'fps',
default = 30,
help = "Frames-per-second to render")
parser.add_argument(
'--freq',
dest = 'show_freq',
default = False,
action = 'store_true',
help = "Display fundamental frequency")
parser.add_argument(
'--pk-pk',
dest = 'show_pk2pk',
default = False,
action = 'store_true',
help = "Display peak 2 peak")
parser.add_argument(
'-w',
'--width',
dest = 'width',
default = 0.05,
type = float,
help = "Oscillocope's width in seconds")
parser.add_argument(
'filename',
help = "Input .wav file to process")
args = parser.parse_args()
assert args.width > 0
assert args.fps > 0
print "Hello Oscillocope!"
#--------------------------------------------------------------------------
# remove exising .png files
pattern = png_filename.replace('%010d', '*')
files = glob.glob(pattern)
for f in files:
os.remove(f)
#--------------------------------------------------------------------------
# read input file
sys.stdout.write("Reading '%s' ... " % args.filename)
sys.stdout.flush()
audio_stream = ns.AudioStream(args.filename)
sr = audio_stream.getSampleRate()
data = audio_stream[args.channel]
print " done"
#--------------------------------------------------------------------------
# compute width and step
width = int(sr * args.width + 0.5)
step = 1.0 / args.fps
n_frames = audio_stream.getDuration() / step
print "fps = ", args.fps
print "sr = ", sr
print "width = ", width
print "step = ", step
print "n_frames = ", n_frames
#--------------------------------------------------------------------------
# Create plot
fig = pylab.figure()
pylab.grid(True)
pylab.xlabel('Time')
pylab.ylabel('Amplitude')
ax = fig.add_subplot(111)
#--------------------------------------------------------------------------
# Create xdata
xdata = np.arange(0, width) / sr
#--------------------------------------------------------------------------
# Process the data
if len(data) < width:
# pad with zeros
n = width - len(data)
for i in xrange(n):
data << 0
# frame 0
t = 0.5 * step
pos = int(t * sr + 0.5)
ydata = data[pos : pos + width]
pylab.axis([xdata[0], xdata[-1], -1, 1])
line, = ax.plot(xdata, ydata)
# Lable with time
time_text = ax.text(
0.98, 0.98,
'%3.2f sec' % t,
horizontalalignment ='right',
verticalalignment = 'top',
transform = ax.transAxes)
#--------------------------------------------------------------------------
# Main loop
update_display = round(n_frames / 200)
if update_display <= 0:
update_display = 1
print "update_display = ", update_display
dur = audio_stream.getDuration()
n = 0
while t < dur:
if n % update_display == 0:
pdone = 100.0 * n / n_frames
sys.stdout.write(" \rProcessing: %6.2f%%" % pdone)
sys.stdout.flush()
ydata = get_ydata(sr, data, t, width)
time_text.set_text('%3.2f sec' % t)
line.set_ydata(ydata)
fig.canvas.draw()
pylab.savefig(png_filename % n)
n += 1
t += step
pdone = 100.0
sys.stdout.write(" \rProcessing: %6.2f%%" % pdone)
print ""
#--------------------------------------------------------------------------
# encode audio + video
# Pass 1
cmd = (
'ffmpeg -r {fps} -i {pngfiles} -r {sr} -i {audiofile} -pass 1 -vcodec libtheora '
'-b:v 6000k -bt 6000k -pix_fmt yuv444p -r {fps} -f rawvideo -y /dev/null')
cmd = cmd.format(
audiofile = args.filename,
fps = args.fps,
pngfiles = png_filename,
sr = sr,
)
os.system(cmd)
# Pass 2
cmd = (
'ffmpeg -r {fps} -i {pngfiles} -r {sr} -i {audiofile} -pass 2 -vcodec libtheora '
'-b:v 6000k -bt 6000k -pix_fmt yuv444p -r {fps} movie.ogv -y'
)
cmd = cmd.format(
audiofile = args.filename,
fps = args.fps,
pngfiles = png_filename,
sr = sr,
)
os.system(cmd)
print "Goodbye!"
if __name__ == "__main__":
main()
|
gpl-2.0
|
quheng/scikit-learn
|
sklearn/utils/tests/test_estimator_checks.py
|
202
|
3757
|
import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.linear_model import LogisticRegression
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
msg = "Estimator type doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(LogisticRegression)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
|
bsd-3-clause
|
zaxliu/scipy
|
scipy/signal/fir_filter_design.py
|
40
|
20637
|
"""Functions for FIR filter design."""
from __future__ import division, print_function, absolute_import
from math import ceil, log
import numpy as np
from numpy.fft import irfft
from scipy.special import sinc
from . import sigtools
__all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord',
'firwin', 'firwin2', 'remez']
# Some notes on function parameters:
#
# `cutoff` and `width` are given as a numbers between 0 and 1. These
# are relative frequencies, expressed as a fraction of the Nyquist rate.
# For example, if the Nyquist rate is 2KHz, then width=0.15 is a width
# of 300 Hz.
#
# The `order` of a FIR filter is one less than the number of taps.
# This is a potential source of confusion, so in the following code,
# we will always use the number of taps as the parameterization of
# the 'size' of the filter. The "number of taps" means the number
# of coefficients, which is the same as the length of the impulse
# response of the filter.
def kaiser_beta(a):
"""Compute the Kaiser parameter `beta`, given the attenuation `a`.
Parameters
----------
a : float
The desired attenuation in the stopband and maximum ripple in
the passband, in dB. This should be a *positive* number.
Returns
-------
beta : float
The `beta` parameter to be used in the formula for a Kaiser window.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
if a > 50:
beta = 0.1102 * (a - 8.7)
elif a > 21:
beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21)
else:
beta = 0.0
return beta
def kaiser_atten(numtaps, width):
"""Compute the attenuation of a Kaiser FIR filter.
Given the number of taps `N` and the transition width `width`, compute the
attenuation `a` in dB, given by Kaiser's formula:
a = 2.285 * (N - 1) * pi * width + 7.95
Parameters
----------
numtaps : int
The number of taps in the FIR filter.
width : float
The desired width of the transition region between passband and
stopband (or, in general, at any discontinuity) for the filter.
Returns
-------
a : float
The attenuation of the ripple, in dB.
See Also
--------
kaiserord, kaiser_beta
"""
a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
return a
def kaiserord(ripple, width):
"""
Design a Kaiser window to limit ripple and width of transition region.
Parameters
----------
ripple : float
Positive number specifying maximum ripple in passband (dB) and minimum
ripple in stopband.
width : float
Width of transition region (normalized so that 1 corresponds to pi
radians / sample).
Returns
-------
numtaps : int
The length of the kaiser window.
beta : float
The beta parameter for the kaiser window.
See Also
--------
kaiser_beta, kaiser_atten
Notes
-----
There are several ways to obtain the Kaiser window:
- ``signal.kaiser(numtaps, beta, sym=0)``
- ``signal.get_window(beta, numtaps)``
- ``signal.get_window(('kaiser', beta), numtaps)``
The empirical equations discovered by Kaiser are used.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
A = abs(ripple) # in case somebody is confused as to what's meant
if A < 8:
# Formula for N is not valid in this range.
raise ValueError("Requested maximum ripple attentuation %f is too "
"small for the Kaiser formula." % A)
beta = kaiser_beta(A)
# Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
# order, so we have to add 1 to get the number of taps.
numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
return int(ceil(numtaps)), beta
def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True,
scale=True, nyq=1.0):
"""
FIR filter design using the window method.
This function computes the coefficients of a finite impulse response
filter. The filter will have linear phase; it will be Type I if
`numtaps` is odd and Type II if `numtaps` is even.
Type II filters always have zero response at the Nyquist rate, so a
ValueError exception is raised if firwin is called with `numtaps` even and
having a passband whose right end is at the Nyquist rate.
Parameters
----------
numtaps : int
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
cutoff : float or 1D array_like
Cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`.
width : float or None, optional
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : string or tuple of string and parameter values, optional
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : bool, optional
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : bool, optional
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
- 0 (DC) if the first passband starts at 0 (i.e. pass_zero
is True)
- `nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise
nyq : float, optional
Nyquist frequency. Each frequency in `cutoff` must be between 0
and `nyq`.
Returns
-------
h : (numtaps,) ndarray
Coefficients of length `numtaps` FIR filter.
Raises
------
ValueError
If any value in `cutoff` is less than or equal to 0 or greater
than or equal to `nyq`, if the values in `cutoff` are not strictly
monotonically increasing, or if `numtaps` is even but a passband
includes the Nyquist frequency.
See also
--------
scipy.signal.firwin2
Examples
--------
Low-pass from 0 to f:
>>> from scipy import signal
>>> numtaps = 3
>>> f = 0.1
>>> signal.firwin(numtaps, f)
array([ 0.06799017, 0.86401967, 0.06799017])
Use a specific window function:
>>> signal.firwin(numtaps, f, window='nuttall')
array([ 3.56607041e-04, 9.99286786e-01, 3.56607041e-04])
High-pass ('stop' from 0 to f):
>>> signal.firwin(numtaps, f, pass_zero=False)
array([-0.00859313, 0.98281375, -0.00859313])
Band-pass:
>>> f1, f2 = 0.1, 0.2
>>> signal.firwin(numtaps, [f1, f2], pass_zero=False)
array([ 0.06301614, 0.88770441, 0.06301614])
Band-stop:
>>> signal.firwin(numtaps, [f1, f2])
array([-0.00801395, 1.0160279 , -0.00801395])
Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1]):
>>> f3, f4 = 0.3, 0.4
>>> signal.firwin(numtaps, [f1, f2, f3, f4])
array([-0.01376344, 1.02752689, -0.01376344])
Multi-band (passbands are [f1, f2] and [f3,f4]):
>>> signal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
array([ 0.04890915, 0.91284326, 0.04890915])
"""
# The major enhancements to this function added in November 2010 were
# developed by Tom Krauss (see ticket #902).
cutoff = np.atleast_1d(cutoff) / float(nyq)
# Check for invalid input.
if cutoff.ndim > 1:
raise ValueError("The cutoff argument must be at most "
"one-dimensional.")
if cutoff.size == 0:
raise ValueError("At least one cutoff frequency must be given.")
if cutoff.min() <= 0 or cutoff.max() >= 1:
raise ValueError("Invalid cutoff frequency: frequencies must be "
"greater than 0 and less than nyq.")
if np.any(np.diff(cutoff) <= 0):
raise ValueError("Invalid cutoff frequencies: the frequencies "
"must be strictly increasing.")
if width is not None:
# A width was given. Find the beta parameter of the Kaiser window
# and set `window`. This overrides the value of `window` passed in.
atten = kaiser_atten(numtaps, float(width) / nyq)
beta = kaiser_beta(atten)
window = ('kaiser', beta)
pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
if pass_nyquist and numtaps % 2 == 0:
raise ValueError("A filter with an even number of coefficients must "
"have zero response at the Nyquist rate.")
# Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff
# is even, and each pair in cutoff corresponds to passband.
cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist))
# `bands` is a 2D array; each row gives the left and right edges of
# a passband.
bands = cutoff.reshape(-1, 2)
# Build up the coefficients.
alpha = 0.5 * (numtaps - 1)
m = np.arange(0, numtaps) - alpha
h = 0
for left, right in bands:
h += right * sinc(right * m)
h -= left * sinc(left * m)
# Get and apply the window function.
from .signaltools import get_window
win = get_window(window, numtaps, fftbins=False)
h *= win
# Now handle scaling if desired.
if scale:
# Get the first passband.
left, right = bands[0]
if left == 0:
scale_frequency = 0.0
elif right == 1:
scale_frequency = 1.0
else:
scale_frequency = 0.5 * (left + right)
c = np.cos(np.pi * m * scale_frequency)
s = np.sum(h * c)
h /= s
return h
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0,
antisymmetric=False):
"""
FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`.
freq : array_like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array_like
The filter gains at the frequency sampling points. Certain
constraints to gain values, depending on the filter type, are applied,
see Notes for details.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float, optional
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
antisymmetric : bool, optional
Whether resulting impulse response is symmetric/antisymmetric.
See Notes for more details.
Returns
-------
taps : ndarray
The filter coefficients of the FIR filter, as a 1-D array of length
`numtaps`.
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The type of filter is determined by
the value of 'numtaps` and `antisymmetric` flag.
There are four possible combinations:
- odd `numtaps`, `antisymmetric` is False, type I filter is produced
- even `numtaps`, `antisymmetric` is False, type II filter is produced
- odd `numtaps`, `antisymmetric` is True, type III filter is produced
- even `numtaps`, `antisymmetric` is True, type IV filter is produced
Magnitude response of all but type I filters are subjects to following
constraints:
- type II -- zero at the Nyquist frequency
- type III -- zero at zero and Nyquist frequencies
- type IV -- zero at zero frequency
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
Examples
--------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> from scipy import signal
>>> taps = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
>>> print(taps[72:78])
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError(('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s') %
(numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if antisymmetric:
if numtaps % 2 == 0:
ftype = 4
else:
ftype = 3
else:
if numtaps % 2 == 0:
ftype = 2
else:
ftype = 1
if ftype == 2 and gain[-1] != 0.0:
raise ValueError("A Type II filter must have zero gain at the "
"Nyquist rate.")
elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0):
raise ValueError("A Type III filter must have zero gain at zero "
"and Nyquist rates.")
elif ftype == 4 and gain[0] != 0.0:
raise ValueError("A Type IV filter must have zero gain at zero rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq) - 1 and freq[k] == freq[k + 1]:
freq[k] = freq[k] - eps
freq[k + 1] = freq[k + 1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
if ftype > 2:
shift *= 1j
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from .signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
if ftype == 3:
out[out.size // 2] = 0.0
return out
def remez(numtaps, bands, desired, weight=None, Hz=1, type='bandpass',
maxiter=25, grid_density=16):
"""
Calculate the minimax optimal filter using the Remez exchange algorithm.
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified
frequency bands using the Remez exchange algorithm.
Parameters
----------
numtaps : int
The desired number of taps in the filter. The number of taps is
the number of terms in the filter, or the filter order plus one.
bands : array_like
A monotonic sequence containing the band edges in Hz.
All elements must be non-negative and less than half the sampling
frequency as given by `Hz`.
desired : array_like
A sequence half the size of bands containing the desired gain
in each of the specified bands.
weight : array_like, optional
A relative weighting to give to each band region. The length of
`weight` has to be half the length of `bands`.
Hz : scalar, optional
The sampling frequency in Hz. Default is 1.
type : {'bandpass', 'differentiator', 'hilbert'}, optional
The type of filter:
'bandpass' : flat response in bands. This is the default.
'differentiator' : frequency proportional response in bands.
'hilbert' : filter with odd symmetry, that is, type III
(for even order) or type IV (for odd order)
linear phase filters.
maxiter : int, optional
Maximum number of iterations of the algorithm. Default is 25.
grid_density : int, optional
Grid density. The dense grid used in `remez` is of size
``(numtaps + 1) * grid_density``. Default is 16.
Returns
-------
out : ndarray
A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
See Also
--------
freqz : Compute the frequency response of a digital filter.
References
----------
.. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
design of optimum FIR linear phase digital filters",
IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
.. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
Program for Designing Optimum FIR Linear Phase Digital
Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
pp. 506-525, 1973.
Examples
--------
We want to construct a filter with a passband at 0.2-0.4 Hz, and
stop bands at 0-0.1 Hz and 0.45-0.5 Hz. Note that this means that the
behavior in the frequency ranges between those bands is unspecified and
may overshoot.
>>> from scipy import signal
>>> bpass = signal.remez(72, [0, 0.1, 0.2, 0.4, 0.45, 0.5], [0, 1, 0])
>>> freq, response = signal.freqz(bpass)
>>> ampl = np.abs(response)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(111)
>>> ax1.semilogy(freq/(2*np.pi), ampl, 'b-') # freq in Hz
>>> plt.show()
"""
# Convert type
try:
tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type]
except KeyError:
raise ValueError("Type must be 'bandpass', 'differentiator', "
"or 'hilbert'")
# Convert weight
if weight is None:
weight = [1] * len(desired)
bands = np.asarray(bands).copy()
return sigtools._remez(numtaps, bands, desired, weight, tnum, Hz,
maxiter, grid_density)
|
bsd-3-clause
|
olologin/scikit-learn
|
sklearn/neural_network/rbm.py
|
46
|
12303
|
"""Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hidden units. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float64)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='fortran')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
|
bsd-3-clause
|
karuppayya/zeppelin
|
python/src/main/resources/grpc/python/ipython_client.py
|
27
|
1457
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import grpc
import ipython_pb2
import ipython_pb2_grpc
def run():
channel = grpc.insecure_channel('localhost:50053')
stub = ipython_pb2_grpc.IPythonStub(channel)
response = stub.execute(ipython_pb2.ExecuteRequest(code="import time\nfor i in range(1,4):\n\ttime.sleep(1)\n\tprint(i)\n" +
"%matplotlib inline\nimport matplotlib.pyplot as plt\ndata=[1,1,2,3,4]\nplt.figure()\nplt.plot(data)"))
for r in response:
print("output:" + r.output)
response = stub.execute(ipython_pb2.ExecuteRequest(code="range?"))
for r in response:
print(r)
if __name__ == '__main__':
run()
|
apache-2.0
|
larsmans/scikit-learn
|
examples/neighbors/plot_regression.py
|
349
|
1402
|
"""
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
|
bsd-3-clause
|
numenta-archive/htmresearch
|
projects/vehicle-control/agent/run_q.py
|
12
|
5498
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from collections import defaultdict
import operator
import time
import numpy
from unity_client.server import Server
from sensorimotor.encoders.one_d_depth import OneDDepthEncoder
from sensorimotor.q_learner import QLearner
ACTIIONS = ["-1", "0", "1"]
class Agent(object):
def __init__(self, position):
self.encoder = OneDDepthEncoder(positions=positions,
radius=5,
wrapAround=True,
nPerPosition=28,
wPerPosition=3,
minVal=0,
maxVal=1)
self.plotter = Plotter(self.encoder)
self.learner = QLearner(ACTIIONS, n=1008)
self.lastState = None
self.lastAction = None
def sync(self, outputData):
if not ("ForwardsSweepSensor" in outputData and
"steer" in outputData):
print "Warning: Missing data:", outputData
return
if outputData.get("reset"):
print "Reset."
sensor = outputData["ForwardsSweepSensor"]
steer = outputData["steer"]
reward = outputData.get("reward") or 0
encoding = self.encoder.encode(numpy.array(sensor))
if self.lastState is not None:
self.learner.update(self.lastState, str(self.lastAction),
encoding, str(steer), reward)
value = self.learner.value(encoding)
qValues = {}
for action in ACTIIONS:
qValues[action] = self.learner.qValue(encoding, action)
inputData = {}
inputData["qValues"] = qValues
inputData["bestAction"] = self.learner.bestAction(encoding)
self.plotter.update(sensor, encoding, steer, reward, value, qValues)
if outputData.get("reset"):
self.plotter.render()
self.lastState = encoding
self.lastAction = steer
return inputData
class Plotter(object):
def __init__(self, encoder):
self.encoder = encoder
self.sensor = []
self.encoding = []
self.steer = []
self.reward = []
self.value = []
self.qValues = defaultdict(lambda: [])
self.bestAction = []
import matplotlib.pyplot as plt
self.plt = plt
import matplotlib.cm as cm
self.cm = cm
from pylab import rcParams
rcParams.update({'figure.figsize': (6, 9)})
# rcParams.update({'figure.autolayout': True})
rcParams.update({'figure.facecolor': 'white'})
def update(self, sensor, encoding, steer, reward, value, qValues):
self.sensor.append(sensor)
self.encoding.append(encoding)
self.steer.append(steer)
self.reward.append(reward)
self.value.append(value)
for key, value in qValues.iteritems():
self.qValues[key].append(value)
bestAction = int(max(qValues.iteritems(), key=operator.itemgetter(1))[0])
self.bestAction.append(bestAction)
def render(self):
self.plt.figure(1)
self.plt.clf()
n = 7
self.plt.subplot(n,1,1)
self._plot(self.steer, "Steer over time")
self.plt.subplot(n,1,2)
self._plot(self.reward, "Reward over time")
self.plt.subplot(n,1,3)
self._plot(self.value, "Value over time")
self.plt.subplot(n,1,4)
shape = len(self.encoder.positions), self.encoder.scalarEncoder.getWidth()
encoding = numpy.array(self.encoding[-1]).reshape(shape).transpose()
self._imshow(encoding, "Encoding at time t")
self.plt.subplot(n,1,5)
data = self.encoding
w = self.encoder.w
overlaps = [sum(a & b) / float(w) for a, b in zip(data[:-1], data[1:])]
self._plot(overlaps, "Encoding overlaps between consecutive times")
# for i, action in enumerate(ACTIIONS):
# self.plt.subplot(n,1,4+i)
# self._plot(self.qValues[action], "Q value: {0}".format(action))
# self.plt.subplot(n,1,7)
# self._plot(self.bestAction, "Best action")
self.plt.draw()
self.plt.savefig("q-{0}.png".format(time.time()))
def _plot(self, data, title):
self.plt.title(title)
self.plt.xlim(0, len(data))
self.plt.plot(range(len(data)), data)
def _imshow(self, data, title):
self.plt.title(title)
self.plt.imshow(data,
cmap=self.cm.Greys,
interpolation="nearest",
aspect='auto',
vmin=0,
vmax=1)
if __name__ == "__main__":
# complete uniform
# positions = [i*20 for i in range(36)]
# forward uniform
positions = [i*10 for i in range(-18, 18)]
agent = Agent(positions)
Server(agent)
|
agpl-3.0
|
danviv/trading-with-python
|
cookbook/workingWithDatesAndTime.py
|
77
|
1551
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 16 17:45:02 2011
@author: jev
"""
import time
import datetime as dt
from pandas import *
from pandas.core import datetools
# basic functions
print 'Epoch start: %s' % time.asctime(time.gmtime(0))
print 'Seconds from epoch: %.2f' % time.time()
today = dt.date.today()
print type(today)
print 'Today is %s' % today.strftime('%Y.%m.%d')
# parse datetime
d = dt.datetime.strptime('20120803 21:59:59',"%Y%m%d %H:%M:%S")
# time deltas
someDate = dt.date(2011,8,1)
delta = today - someDate
print 'Delta :', delta
# calculate difference in dates
delta = dt.timedelta(days=20)
print 'Today-delta=', today-delta
t = dt.datetime(*time.strptime('3/30/2004',"%m/%d/%Y")[0:5])
# the '*' operator unpacks the tuple, producing the argument list.
print t
# print every 3d wednesday of the month
for month in xrange(1,13):
t = dt.date(2013,month,1)+datetools.relativedelta(months=1)
offset = datetools.Week(weekday=4)
if t.weekday()<>4:
t_new = t+3*offset
else:
t_new = t+2*offset
t_new = t_new-datetools.relativedelta(days=30)
print t_new.strftime("%B, %d %Y (%A)")
#rng = DateRange(t, t+datetools.YearEnd())
#print rng
# create a range of times
start = dt.datetime(2012,8,1)+datetools.relativedelta(hours=9,minutes=30)
end = dt.datetime(2012,8,1)+datetools.relativedelta(hours=22)
rng = date_range(start,end,freq='30min')
for r in rng: print r.strftime("%Y%m%d %H:%M:%S")
|
bsd-3-clause
|
datapythonista/pandas
|
pandas/tests/dtypes/cast/test_promote.py
|
2
|
22013
|
"""
These test the method maybe_promote from core/dtypes/cast.py
"""
import datetime
from decimal import Decimal
import numpy as np
import pytest
from pandas._libs.tslibs import NaT
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.common import (
is_complex_dtype,
is_datetime64_dtype,
is_datetime_or_timedelta_dtype,
is_float_dtype,
is_integer_dtype,
is_object_dtype,
is_scalar,
is_timedelta64_dtype,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import isna
import pandas as pd
import pandas._testing as tm
@pytest.fixture(
params=[
bool,
"uint8",
"int32",
"uint64",
"float32",
"float64",
"complex64",
"complex128",
"M8[ns]",
"m8[ns]",
str,
bytes,
object,
]
)
def any_numpy_dtype_reduced(request):
"""
Parameterized fixture for numpy dtypes, reduced from any_numpy_dtype.
* bool
* 'int32'
* 'uint64'
* 'float32'
* 'float64'
* 'complex64'
* 'complex128'
* 'M8[ns]'
* 'M8[ns]'
* str
* bytes
* object
"""
return request.param
def _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar=None):
"""
Auxiliary function to unify testing of scalar/array promotion.
Parameters
----------
dtype : dtype
The value to pass on as the first argument to maybe_promote.
fill_value : scalar
The value to pass on as the second argument to maybe_promote as
a scalar.
expected_dtype : dtype
The expected dtype returned by maybe_promote (by design this is the
same regardless of whether fill_value was passed as a scalar or in an
array!).
exp_val_for_scalar : scalar
The expected value for the (potentially upcast) fill_value returned by
maybe_promote.
"""
assert is_scalar(fill_value)
# here, we pass on fill_value as a scalar directly; the expected value
# returned from maybe_promote is fill_value, potentially upcast to the
# returned dtype.
result_dtype, result_fill_value = maybe_promote(dtype, fill_value)
expected_fill_value = exp_val_for_scalar
assert result_dtype == expected_dtype
_assert_match(result_fill_value, expected_fill_value)
def _assert_match(result_fill_value, expected_fill_value):
# GH#23982/25425 require the same type in addition to equality/NA-ness
res_type = type(result_fill_value)
ex_type = type(expected_fill_value)
if hasattr(result_fill_value, "dtype"):
# Compare types in a way that is robust to platform-specific
# idiosyncrasies where e.g. sometimes we get "ulonglong" as an alias
# for "uint64" or "intc" as an alias for "int32"
assert result_fill_value.dtype.kind == expected_fill_value.dtype.kind
assert result_fill_value.dtype.itemsize == expected_fill_value.dtype.itemsize
else:
# On some builds, type comparison fails, e.g. np.int32 != np.int32
assert res_type == ex_type or res_type.__name__ == ex_type.__name__
match_value = result_fill_value == expected_fill_value
if match_value is pd.NA:
match_value = False
# Note: type check above ensures that we have the _same_ NA value
# for missing values, None == None (which is checked
# through match_value above), but np.nan != np.nan and pd.NaT != pd.NaT
match_missing = isna(result_fill_value) and isna(expected_fill_value)
assert match_value or match_missing
@pytest.mark.parametrize(
"dtype, fill_value, expected_dtype",
[
# size 8
("int8", 1, "int8"),
("int8", np.iinfo("int8").max + 1, "int16"),
("int8", np.iinfo("int16").max + 1, "int32"),
("int8", np.iinfo("int32").max + 1, "int64"),
("int8", np.iinfo("int64").max + 1, "object"),
("int8", -1, "int8"),
("int8", np.iinfo("int8").min - 1, "int16"),
("int8", np.iinfo("int16").min - 1, "int32"),
("int8", np.iinfo("int32").min - 1, "int64"),
("int8", np.iinfo("int64").min - 1, "object"),
# keep signed-ness as long as possible
("uint8", 1, "uint8"),
("uint8", np.iinfo("int8").max + 1, "uint8"),
("uint8", np.iinfo("uint8").max + 1, "uint16"),
("uint8", np.iinfo("int16").max + 1, "uint16"),
("uint8", np.iinfo("uint16").max + 1, "uint32"),
("uint8", np.iinfo("int32").max + 1, "uint32"),
("uint8", np.iinfo("uint32").max + 1, "uint64"),
("uint8", np.iinfo("int64").max + 1, "uint64"),
("uint8", np.iinfo("uint64").max + 1, "object"),
# max of uint8 cannot be contained in int8
("uint8", -1, "int16"),
("uint8", np.iinfo("int8").min - 1, "int16"),
("uint8", np.iinfo("int16").min - 1, "int32"),
("uint8", np.iinfo("int32").min - 1, "int64"),
("uint8", np.iinfo("int64").min - 1, "object"),
# size 16
("int16", 1, "int16"),
("int16", np.iinfo("int8").max + 1, "int16"),
("int16", np.iinfo("int16").max + 1, "int32"),
("int16", np.iinfo("int32").max + 1, "int64"),
("int16", np.iinfo("int64").max + 1, "object"),
("int16", -1, "int16"),
("int16", np.iinfo("int8").min - 1, "int16"),
("int16", np.iinfo("int16").min - 1, "int32"),
("int16", np.iinfo("int32").min - 1, "int64"),
("int16", np.iinfo("int64").min - 1, "object"),
("uint16", 1, "uint16"),
("uint16", np.iinfo("int8").max + 1, "uint16"),
("uint16", np.iinfo("uint8").max + 1, "uint16"),
("uint16", np.iinfo("int16").max + 1, "uint16"),
("uint16", np.iinfo("uint16").max + 1, "uint32"),
("uint16", np.iinfo("int32").max + 1, "uint32"),
("uint16", np.iinfo("uint32").max + 1, "uint64"),
("uint16", np.iinfo("int64").max + 1, "uint64"),
("uint16", np.iinfo("uint64").max + 1, "object"),
("uint16", -1, "int32"),
("uint16", np.iinfo("int8").min - 1, "int32"),
("uint16", np.iinfo("int16").min - 1, "int32"),
("uint16", np.iinfo("int32").min - 1, "int64"),
("uint16", np.iinfo("int64").min - 1, "object"),
# size 32
("int32", 1, "int32"),
("int32", np.iinfo("int8").max + 1, "int32"),
("int32", np.iinfo("int16").max + 1, "int32"),
("int32", np.iinfo("int32").max + 1, "int64"),
("int32", np.iinfo("int64").max + 1, "object"),
("int32", -1, "int32"),
("int32", np.iinfo("int8").min - 1, "int32"),
("int32", np.iinfo("int16").min - 1, "int32"),
("int32", np.iinfo("int32").min - 1, "int64"),
("int32", np.iinfo("int64").min - 1, "object"),
("uint32", 1, "uint32"),
("uint32", np.iinfo("int8").max + 1, "uint32"),
("uint32", np.iinfo("uint8").max + 1, "uint32"),
("uint32", np.iinfo("int16").max + 1, "uint32"),
("uint32", np.iinfo("uint16").max + 1, "uint32"),
("uint32", np.iinfo("int32").max + 1, "uint32"),
("uint32", np.iinfo("uint32").max + 1, "uint64"),
("uint32", np.iinfo("int64").max + 1, "uint64"),
("uint32", np.iinfo("uint64").max + 1, "object"),
("uint32", -1, "int64"),
("uint32", np.iinfo("int8").min - 1, "int64"),
("uint32", np.iinfo("int16").min - 1, "int64"),
("uint32", np.iinfo("int32").min - 1, "int64"),
("uint32", np.iinfo("int64").min - 1, "object"),
# size 64
("int64", 1, "int64"),
("int64", np.iinfo("int8").max + 1, "int64"),
("int64", np.iinfo("int16").max + 1, "int64"),
("int64", np.iinfo("int32").max + 1, "int64"),
("int64", np.iinfo("int64").max + 1, "object"),
("int64", -1, "int64"),
("int64", np.iinfo("int8").min - 1, "int64"),
("int64", np.iinfo("int16").min - 1, "int64"),
("int64", np.iinfo("int32").min - 1, "int64"),
("int64", np.iinfo("int64").min - 1, "object"),
("uint64", 1, "uint64"),
("uint64", np.iinfo("int8").max + 1, "uint64"),
("uint64", np.iinfo("uint8").max + 1, "uint64"),
("uint64", np.iinfo("int16").max + 1, "uint64"),
("uint64", np.iinfo("uint16").max + 1, "uint64"),
("uint64", np.iinfo("int32").max + 1, "uint64"),
("uint64", np.iinfo("uint32").max + 1, "uint64"),
("uint64", np.iinfo("int64").max + 1, "uint64"),
("uint64", np.iinfo("uint64").max + 1, "object"),
("uint64", -1, "object"),
("uint64", np.iinfo("int8").min - 1, "object"),
("uint64", np.iinfo("int16").min - 1, "object"),
("uint64", np.iinfo("int32").min - 1, "object"),
("uint64", np.iinfo("int64").min - 1, "object"),
],
)
def test_maybe_promote_int_with_int(dtype, fill_value, expected_dtype):
dtype = np.dtype(dtype)
expected_dtype = np.dtype(expected_dtype)
# output is not a generic int, but corresponds to expected_dtype
exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_int_with_float(any_int_dtype, float_dtype):
dtype = np.dtype(any_int_dtype)
fill_dtype = np.dtype(float_dtype)
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
# filling int with float always upcasts to float64
expected_dtype = np.float64
# fill_value can be different float type
exp_val_for_scalar = np.float64(fill_value)
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_float_with_int(float_dtype, any_int_dtype):
dtype = np.dtype(float_dtype)
fill_dtype = np.dtype(any_int_dtype)
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
# filling float with int always keeps float dtype
# because: np.finfo('float32').max > np.iinfo('uint64').max
expected_dtype = dtype
# output is not a generic float, but corresponds to expected_dtype
exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
@pytest.mark.parametrize(
"dtype, fill_value, expected_dtype",
[
# float filled with float
("float32", 1, "float32"),
("float32", np.finfo("float32").max * 1.1, "float64"),
("float64", 1, "float64"),
("float64", np.finfo("float32").max * 1.1, "float64"),
# complex filled with float
("complex64", 1, "complex64"),
("complex64", np.finfo("float32").max * 1.1, "complex128"),
("complex128", 1, "complex128"),
("complex128", np.finfo("float32").max * 1.1, "complex128"),
# float filled with complex
("float32", 1 + 1j, "complex64"),
("float32", np.finfo("float32").max * (1.1 + 1j), "complex128"),
("float64", 1 + 1j, "complex128"),
("float64", np.finfo("float32").max * (1.1 + 1j), "complex128"),
# complex filled with complex
("complex64", 1 + 1j, "complex64"),
("complex64", np.finfo("float32").max * (1.1 + 1j), "complex128"),
("complex128", 1 + 1j, "complex128"),
("complex128", np.finfo("float32").max * (1.1 + 1j), "complex128"),
],
)
def test_maybe_promote_float_with_float(dtype, fill_value, expected_dtype):
dtype = np.dtype(dtype)
expected_dtype = np.dtype(expected_dtype)
# output is not a generic float, but corresponds to expected_dtype
exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_bool_with_any(any_numpy_dtype_reduced):
dtype = np.dtype(bool)
fill_dtype = np.dtype(any_numpy_dtype_reduced)
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
# filling bool with anything but bool casts to object
expected_dtype = np.dtype(object) if fill_dtype != bool else fill_dtype
exp_val_for_scalar = fill_value
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_any_with_bool(any_numpy_dtype_reduced):
dtype = np.dtype(any_numpy_dtype_reduced)
fill_value = True
# filling anything but bool with bool casts to object
expected_dtype = np.dtype(object) if dtype != bool else dtype
# output is not a generic bool, but corresponds to expected_dtype
exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_bytes_with_any(bytes_dtype, any_numpy_dtype_reduced):
dtype = np.dtype(bytes_dtype)
fill_dtype = np.dtype(any_numpy_dtype_reduced)
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
# we never use bytes dtype internally, always promote to object
expected_dtype = np.dtype(np.object_)
exp_val_for_scalar = fill_value
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_any_with_bytes(any_numpy_dtype_reduced, bytes_dtype):
dtype = np.dtype(any_numpy_dtype_reduced)
# create array of given dtype
fill_value = b"abc"
# we never use bytes dtype internally, always promote to object
expected_dtype = np.dtype(np.object_)
# output is not a generic bytes, but corresponds to expected_dtype
exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_datetime64_with_any(datetime64_dtype, any_numpy_dtype_reduced):
dtype = np.dtype(datetime64_dtype)
fill_dtype = np.dtype(any_numpy_dtype_reduced)
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
# filling datetime with anything but datetime casts to object
if is_datetime64_dtype(fill_dtype):
expected_dtype = dtype
# for datetime dtypes, scalar values get cast to to_datetime64
exp_val_for_scalar = pd.Timestamp(fill_value).to_datetime64()
else:
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
@pytest.mark.parametrize(
"fill_value",
[
pd.Timestamp("now"),
np.datetime64("now"),
datetime.datetime.now(),
datetime.date.today(),
],
ids=["pd.Timestamp", "np.datetime64", "datetime.datetime", "datetime.date"],
)
def test_maybe_promote_any_with_datetime64(
any_numpy_dtype_reduced, datetime64_dtype, fill_value
):
dtype = np.dtype(any_numpy_dtype_reduced)
# filling datetime with anything but datetime casts to object
if is_datetime64_dtype(dtype):
expected_dtype = dtype
# for datetime dtypes, scalar values get cast to pd.Timestamp.value
exp_val_for_scalar = pd.Timestamp(fill_value).to_datetime64()
else:
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
warn = None
msg = "Using a `date` object for fill_value"
if type(fill_value) is datetime.date and dtype.kind == "M":
# Casting date to dt64 is deprecated
warn = FutureWarning
with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):
# stacklevel is chosen to make sense when called from higher-level functions
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
@pytest.mark.parametrize(
"fill_value",
[
pd.Timestamp("now"),
np.datetime64("now"),
datetime.datetime.now(),
datetime.date.today(),
],
ids=["pd.Timestamp", "np.datetime64", "datetime.datetime", "datetime.date"],
)
def test_maybe_promote_any_numpy_dtype_with_datetimetz(
any_numpy_dtype_reduced, tz_aware_fixture, fill_value
):
dtype = np.dtype(any_numpy_dtype_reduced)
fill_dtype = DatetimeTZDtype(tz=tz_aware_fixture)
fill_value = pd.Series([fill_value], dtype=fill_dtype)[0]
# filling any numpy dtype with datetimetz casts to object
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_timedelta64_with_any(timedelta64_dtype, any_numpy_dtype_reduced):
dtype = np.dtype(timedelta64_dtype)
fill_dtype = np.dtype(any_numpy_dtype_reduced)
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
# filling timedelta with anything but timedelta casts to object
if is_timedelta64_dtype(fill_dtype):
expected_dtype = dtype
# for timedelta dtypes, scalar values get cast to pd.Timedelta.value
exp_val_for_scalar = pd.Timedelta(fill_value).to_timedelta64()
else:
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
@pytest.mark.parametrize(
"fill_value",
[pd.Timedelta(days=1), np.timedelta64(24, "h"), datetime.timedelta(1)],
ids=["pd.Timedelta", "np.timedelta64", "datetime.timedelta"],
)
def test_maybe_promote_any_with_timedelta64(
any_numpy_dtype_reduced, timedelta64_dtype, fill_value
):
dtype = np.dtype(any_numpy_dtype_reduced)
# filling anything but timedelta with timedelta casts to object
if is_timedelta64_dtype(dtype):
expected_dtype = dtype
# for timedelta dtypes, scalar values get cast to pd.Timedelta.value
exp_val_for_scalar = pd.Timedelta(fill_value).to_timedelta64()
else:
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_string_with_any(string_dtype, any_numpy_dtype_reduced):
dtype = np.dtype(string_dtype)
fill_dtype = np.dtype(any_numpy_dtype_reduced)
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
# filling string with anything casts to object
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_any_with_string(any_numpy_dtype_reduced, string_dtype):
dtype = np.dtype(any_numpy_dtype_reduced)
# create array of given dtype
fill_value = "abc"
# filling anything with a string casts to object
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_object_with_any(object_dtype, any_numpy_dtype_reduced):
dtype = np.dtype(object_dtype)
fill_dtype = np.dtype(any_numpy_dtype_reduced)
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
# filling object with anything stays object
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_any_with_object(any_numpy_dtype_reduced, object_dtype):
dtype = np.dtype(any_numpy_dtype_reduced)
# create array of object dtype from a scalar value (i.e. passing
# dtypes.common.is_scalar), which can however not be cast to int/float etc.
fill_value = pd.DateOffset(1)
# filling object with anything stays object
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_any_numpy_dtype_with_na(any_numpy_dtype_reduced, nulls_fixture):
fill_value = nulls_fixture
dtype = np.dtype(any_numpy_dtype_reduced)
if isinstance(fill_value, Decimal):
# Subject to change, but ATM (When Decimal(NAN) is being added to nulls_fixture)
# this is the existing behavior in maybe_promote,
# hinges on is_valid_na_for_dtype
if dtype.kind in ["i", "u", "f", "c"]:
if dtype.kind in ["i", "u"]:
expected_dtype = np.dtype(np.float64)
else:
expected_dtype = dtype
exp_val_for_scalar = np.nan
else:
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
elif is_integer_dtype(dtype) and fill_value is not NaT:
# integer + other missing value (np.nan / None) casts to float
expected_dtype = np.float64
exp_val_for_scalar = np.nan
elif is_object_dtype(dtype) and fill_value is NaT:
# inserting into object does not cast the value
# but *does* cast None to np.nan
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
elif is_datetime_or_timedelta_dtype(dtype):
# datetime / timedelta cast all missing values to dtyped-NaT
expected_dtype = dtype
exp_val_for_scalar = dtype.type("NaT", "ns")
elif fill_value is NaT:
# NaT upcasts everything that's not datetime/timedelta to object
expected_dtype = np.dtype(object)
exp_val_for_scalar = NaT
elif is_float_dtype(dtype) or is_complex_dtype(dtype):
# float / complex + missing value (!= NaT) stays the same
expected_dtype = dtype
exp_val_for_scalar = np.nan
else:
# all other cases cast to object, and use np.nan as missing value
expected_dtype = np.dtype(object)
if fill_value is pd.NA:
exp_val_for_scalar = pd.NA
else:
exp_val_for_scalar = np.nan
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
|
bsd-3-clause
|
gouthambs/OpenData
|
src/longbeach_crime_stats.py
|
1
|
10310
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 22 12:07:53 2014
@author: Gouthaman Balaraman
"""
import requests
import pandas as pd
from bs4 import BeautifulSoup
import re
import numpy as np
import os
#####################################################
# A bunch of constants used throught the script. #
#####################################################
_curdir= os.path.abspath(os.path.curdir)
_posdat = re.compile('(\w+):(\d+)px')
_topdat = re.compile('top:(\d+)px')
_leftdat = re.compile('top:(\d+)px')
# this is the full format with all columns; The numbers here bracket the columns
maptbl_long = [(0,75),(75,145),(145,212),(212,283),(283,350),(350,418),(418,486),
(486,554),(554,621),(621,688),(688,756),(756,823),(823,890),(890,958),
(958,1026),(1026,1094),(1094,1199)]
# This provides a mapping to the column with the text
mptbltxt = ['RD','MURDER','MANSLTR','FORCED_RAPE','ROBBERY','AGGRAV_ASSAULT',
'BURGLARY_RES','BURGLARY_COM','AUTO_BURG','GRAND_THEFT','PETTY_THEFT',
'BIKE_THEFT','AUTO_THEFT','ARSON','TOTAL_PART1','TOTAL_PART2','GRAND_TOTAL']
#this a truncate version I found for some months; The numbers here bracket the columns
maptbl_short=[(0,133),(133,194.5),(194.5,264),(264,329),(329,396),(396,466),(466,531),
(531,597),(597,667.5),(667.5,736),(736,803),(803,871),(871,938),(938,1004),(1004,1300)
]
def load_html(filename):
soup = BeautifulSoup(file(filename).read())
return soup
def grab_pages(soup):
return soup.body.find_all('div')
def cleanup_data(data):
# remove  
data = data.replace(u'\xa0','')
return data
def create_buckets(arr):
'''
Here we bin the rows based on 'top' value
'''
sarr = np.sort(arr)
# coarseness ; this is used to separate different rows
crsns = 10# np.mean(sdiff)
s = 0
prev = sarr[0]
buckets = []
for sa in sarr[1:]:
if sa-prev>crsns:
e = (sa+prev)*0.5
buckets.append((s,e))
s = e
prev = sa
#else
buckets.append((s,s+40))
return [buckets,[i for i,y in enumerate(buckets)]]
def create_frame(pnodes,mptbl,mptbltxt,lftmrkr):
'''
For a given page, here I use the position to tag it with a column number.
Then a data frame is created and the pivot_table option is construct back
a proper table to resemble the actual data set.
'''
df = pd.DataFrame(pnodes)
[tmptbl,tmptblval] = create_buckets(df.top.unique()) # buckets for top
dval = []
for t in tmptbl:
dvlst = df[(df["top"]>=t[0])&(df["top"]<=t[1])&(df['left']<lftmrkr)]['content'].values
#dval.append(dvlst[0] if len(dvlst)>0 else u'RD')
cval = dvlst[0] if len(dvlst)>0 else u'RD'
dval.append(cval)
#df[(df["top"]>=t[0])&(df["top"]<=t[1])]['rowval'] = cval
df['row'] = df['top'].map(lambda g:
[
dval[i] for i,x in enumerate(tmptbl)
if ((x[0]<=g)and(g<=x[1])) or None
][0]
)
dfs = df[df['row']!='RD']
dlst = dcnt = []
for i,v in dfs.iterrows():
if v.left<lftmrkr:
dcnt.append(v.content)
dlst.append(v.top)
dfs['column'] = dfs['left'].map(lambda g: [mptbltxt[i] for i,x in enumerate(mptbl)
if ((x[0]<=g)and(g<=x[1]))][0])
pvt = dfs.pivot(index='row',columns='column',values='content')
pvt.fillna(0,inplace=True)
for c in pvt.columns:
try:
pvt[c] = pvt[c].astype(int)
except:
pass
return pvt
'''
# this didn't work; need to check later
def grab_monthlypdfs():
domain='http://www.longbeach.gov'
url = 'http://www.longbeach.gov/police/statistics.asp'
res = requests.get(url)
sp = BeautifulSoup(res.text)
tbody = sp.find_all('tbody')
links = tbody[3].find_all('a')
pdfdir = os.path.join(_curdir,'files','PDF')
if not os.path.exists(pdfdir):
os.makedirs(pdfdir)
for l in links:
title = '_'.join( l['title'].split(" ") )
print title
try:
res = requests.get(domain+l['href'],stream=True)
pdffile = os.path.join(pdfdir,title+'.pdf')
with open(pdffile,'wb') as f:
for chunk in res.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
except Exception as e:
print 'FAILED: '+str(e)+l['title']+" "+l['href']
'''
def extract_nodes(p,lftmrkr):
'''
This is the code that extracts the beautiful soup html document into
a bunch of nodes for easy processing
'''
nodes = p.find_all('p' )
dlist = []
nextdat = {}
for node in nodes:
ddict = {}
attrs = node.attrs
attrssty = attrs.get('style','')
attrscls = attrs.get('class','')
if attrscls[0] == 'ft01' or attrscls[0] == 'ft03':
posns = _posdat.findall(attrssty)
if len(posns) == 2:
k,v = zip(*posns)
if ('top' in k ) and ('left' in k):
if nextdat != {}:
nextdat['top'] = int(v[0]) if k[0] == 'top' else int(v[1])
ddict = nextdat
nextdat = {}
ddict[k[0]] = int(v[0])
ddict[k[1]] = int(v[1])
cont = node.contents
if len(cont) == 1 :
ddict['content'] = cont[0].replace('\xa0','0')
elif len(cont)==3:
ddict['content'] = cont[0].replace('\xa0','0')
nextdat['content'] = cont[2].replace('\xa0','0')
nextdat['left'] = int(v[1])if k[1] == 'left' else int(v[0])
#if (ddict['left']<lftmrkr) and (ddict['content']!= 'RD'):
# currrd = ddict['content']
#ddict['rd'] = currrd
dlist.append(ddict)
return dlist
def create_html(pdffile):
'''
Given a pdf file, this calls pdftohtml.exe to convert to html
'''
try:
pdftohtml = "pdftohtml.exe "
htmldir = os.path.join(_curdir,'files','HTML')
if not os.path.exists(htmldir):
os.makedirs(htmldir)
pdffile = os.path.abspath(pdffile)
fileprefix = os.path.split(pdffile)[1].split('.pdf')[0]
cmd = pdftohtml+pdffile+" -c -noframes "+os.path.join(htmldir,fileprefix+".html")
print cmd
os.system(cmd)
except Exception as e:
print str(e)
def convert_all_pdfs(pdfdir):
'''
Convenient method to loop over all the pdf files. Calls create_html
file in a loop.
'''
for f in os.listdir(pdfdir):
if f.endswith('.pdf'):
create_html(os.path.join(pdfdir,f))
def _finalize_dataframe(ddf):
'''
Does some clean-up, check sums to validate the data. This is a basic
check. Nothing is guaranteed!
'''
# do a checksum test
if 'TOTAL_PART1' in ddf.columns:
checksum = np.sum(\
np.power(
ddf[mptbltxt[1:14]].astype(int).sum(axis=1) -
ddf['TOTAL_PART1'].astype(int)
,2)
)
if checksum:
print "Failed check sum test "+str(checksum)
else:
print "Passed checksum test"
# reorder the columns
if len(ddf.columns) == 17:
ddf = ddf[mptbltxt]
else:
ddf = ddf[mptbltxt[:15]]
del ddf['RD']
ddf.index.name = 'RD'
return ddf
def create_csv(htmlfile):
'''
This creates the csv file given a html file
'''
try:
print "Converting "+htmlfile
soup = load_html(htmlfile)
pages = grab_pages(soup)
num_nodes = len(pages[0])
leftmrkr = 75 if num_nodes > 440 else 133 # to handle two pdf formats
mptbl = maptbl_long if num_nodes > 440 else maptbl_short
#filetype = 1 if num_nodes > 480 else 0 # 1 if long type else 0
pvts = []
for i,p in enumerate(pages):
print 'Page-'+str(i)
dlist = extract_nodes(p,leftmrkr)
#df = create_frame(dlist,mptbl0,mptbltxt,leftmrkr)
df = create_frame(dlist,mptbl,mptbltxt,leftmrkr)
pvts.append(df)
ddf = pd.concat(pvts)
exclrows = set(['0'+str(i)for i in range(2000,2020,1)]) | set(['%CHG'])
exclrows = exclrows & set(ddf.index)
ddf.drop(exclrows,inplace=True)
ddf.fillna(0,inplace=True)
#cleanup
ddf = _finalize_dataframe(ddf)
csvdir = os.path.join(_curdir,'files','CSV')
if not os.path.exists(csvdir):
os.makedirs(csvdir)
htmlfile = os.path.abspath(htmlfile)
fileprefix = os.path.split(htmlfile)[1].split('.html')[0]
csvfile = os.path.join(csvdir,fileprefix+".csv")
ddf.to_csv(csvfile)
except Exception as e:
print str(e)
def convert_all_htmls(htmldir):
'''
This is a top leve driver which calls create_csv in a loop
'''
for f in os.listdir(htmldir):
if f.endswith('.html'):
create_csv(os.path.join(htmldir,f))
#break
if __name__=='__main__':
'''
Here is a complete example to loop over all pdfs and create all csvs.
>>>pdfdir = "D:\\Development\\Python\\CrimeData\\files\\PDF"
>>>convert_all_pdfs(pdfdir)
>>>htmldir = "D:\\Development\\Python\\CrimeData\\files\\HTML"
>>>convert_all_htmls(htmldir)
Or you can do individual file conversions:
>>>pdffile = os.path.join(pdfdir,'January_2013.pdf')
>>>create_html(pdffile)
'''
# Convert pdfs to html
pdfdir = "D:\\Development\\Python\\CrimeData\\files\\PDF"
pdffile = os.path.join(pdfdir,'January_2013.pdf')
create_html(pdffile)
#convert_all_pdfs(pdfdir)
# Then convert html to csv
htmldir = "D:\\Development\\Python\\CrimeData\\files\\HTML"
html = os.path.join(htmldir,'January_2013.html')
create_csv(html)
#convert_all_htmls(htmldir)
|
mit
|
IDEALLab/design_embeddings_jmd_2016
|
hp_sae.py
|
1
|
7435
|
"""
Hyperparameter optimization for stacked autoencoders using pysmac
Usage: python hp_autoencoder.py
Author(s): Wei Chen ([email protected])
"""
import ConfigParser
import pickle
import math
import timeit
import numpy as np
from sklearn.cross_validation import KFold
from stacked_ae import sae
from intrinsic_dim import mide
from parametric_space import initialize
from util import create_dir, reduce_dim
import pysmac
def cross_validate(hidden_size_l1, hidden_size_l2, hidden_size_l3, hidden_size_l4, p,
l, batch_size, X, n_folds, n_components):
# K-fold cross-validation
kf = KFold(X.shape[0], n_folds=n_folds, shuffle=True)
i = 1
loss = 0
for train, test in kf:
train = train.tolist()
test = test.tolist()
print 'cross validation: %d' % i
i += 1
if len(train)>10 and len(test): # if there are enough training and test samples
# Get cost
loss += sae(X, n_components, train, test, hidden_size_l1, hidden_size_l2, hidden_size_l3, hidden_size_l4, p,
l, batch_size, evaluation=True)
else:
print 'Please add more samples!'
# Get test reconstruction error
rec_err_cv = loss/n_folds
return rec_err_cv
def wrapper(hidden_size_l1, hidden_size_l2, hidden_size_l3, hidden_size_l4, p, lg_l, batch_size):
l = 10**lg_l
with open(tempname, 'rb') as f:
temp = pickle.load(f)
temp[2] += 1 # iteration
print '----------------------------------------------------'
print '%d/%d' % (c+1, len(X_list))
print 'Iteration: %d/%d' %(temp[2], n_iter)
print '1st hidden layer size = ', hidden_size_l1
print '2nd hidden layer size = ', hidden_size_l2
print '3rd hidden layer size = ', hidden_size_l3
print '4th hidden layer size = ', hidden_size_l4
print 'dropout fraction = ', p
print 'weight decay = ', l
print 'batch size = ', batch_size
rec_err_cv = cross_validate(hidden_size_l1, hidden_size_l2, hidden_size_l3, hidden_size_l4, p,
l, batch_size, X_l[trainc], n_folds, n_features)
print 'Result of algorithm run: SUCCESS, %f' % rec_err_cv
if rec_err_cv < temp[0]:
temp[0:2] = [rec_err_cv, 0]
temp[3] = [hidden_size_l1, hidden_size_l2, hidden_size_l3, hidden_size_l4, p, l, batch_size]
else:
temp[1] += 1
with open(tempname, 'wb') as f:
pickle.dump(temp, f)
print '********* Optimal configuration **********'
print '1st hidden layer size = ', temp[3][0]
print '2nd hidden layer size = ', temp[3][1]
print '3rd hidden layer size = ', temp[3][2]
print '4th hidden layer size = ', temp[3][3]
print 'dropout fraction = ', temp[3][4]
print 'weight decay = ', temp[3][5]
print 'batch size = ', temp[3][6]
print 'optimal: ', temp[0]
print 'count: ', temp[1]
return rec_err_cv
def opt():
global temp, tempname, n_folds, c, X_list, n_iter, n_features, X_l, trainc
config = ConfigParser.ConfigParser()
config.read('./config.ini')
n_folds = config.getint('Global', 'n_folds')
max_dim = config.getint('Global', 'n_features')
X_list, source, sname, n_samples, n_points, noise_scale, source_dir = initialize()
test_size = config.getfloat('Global', 'test_size')
# Open the config file
cfgname = './hp_opt/hp_%s_%.4f.ini' % (sname, noise_scale)
hp = ConfigParser.ConfigParser()
hp.read(cfgname)
start_time = timeit.default_timer()
c = 0
for X in X_list:
if X.shape[0] < 10:
c += 1
continue
print '============ Cluster %d ============' % c
# Initialize file to store the reconstruction error and the count
temp = [np.inf, 0, 0, [0]*2] #[err, count, iteration, optimal parameters]
create_dir('./hp_opt/temp/')
tempname = './hp_opt/temp/sae'
with open(tempname, 'wb') as f:
pickle.dump(temp, f)
# Reduce dimensionality
X_l, dim_increase = reduce_dim(X, plot=False)
n_allc = X.shape[0]
if n_allc < 10:
continue
# Specify training and test set
n_trainc = int(math.floor(n_allc * (1-test_size)))
print 'Training sample size: ', n_trainc
trainc = range(n_trainc)
# Estimate intrinsic dimension and nonlinearity
print 'Estimating intrinsic dimension ...'
intr_dim = mide(X_l[trainc], verbose=0)[0]
print 'Intrinsic dimension: ', intr_dim
if intr_dim < max_dim:
n_features = intr_dim
else:
n_features = max_dim
# Define parameters
hs_min = n_features+1
hs_max = int(X_l.shape[1] * 2)
parameters=dict(
hidden_size_l1=('categorical', range(hs_min, hs_max+1)+[0], 0),\
hidden_size_l2=('categorical', range(hs_min, hs_max+1)+[0], 0),\
hidden_size_l3=('categorical', range(hs_min, hs_max+1)+[0], 0),\
hidden_size_l4=('categorical', range(hs_min, hs_max+1)+[0], 0),\
p = ('real', [0, 0.5], 0.3),\
lg_l = ('real', [-10, 0.0], -10),\
batch_size = ('integer', [1, n_trainc], 100),\
)
# Define constraints
forbidden_confs = ["{hidden_size_l1 < hidden_size_l2}",\
"{hidden_size_l2 < hidden_size_l3}",\
"{hidden_size_l3 < hidden_size_l4}"]
# Create a SMAC_optimizer object
opt = pysmac.SMAC_optimizer()
n_iter = 500
start_time = timeit.default_timer()
value, parameters = opt.minimize(wrapper, # the function to be minimized
n_iter, # the number of function calls allowed
parameters, # the parameter dictionary
forbidden_clauses = forbidden_confs) # constraints
# Write optimal parameters to the config file
section = 'stacked_AE'+str(c)
if not hp.has_section(section):
# Create the section if it does not exist.
hp.add_section(section)
hp.write(open(cfgname,'w'))
hp.read(cfgname)
hp.set(section,'hidden_size_l1',parameters['hidden_size_l1'])
hp.set(section,'hidden_size_l2',parameters['hidden_size_l2'])
hp.set(section,'hidden_size_l3',parameters['hidden_size_l3'])
hp.set(section,'hidden_size_l4',parameters['hidden_size_l4'])
hp.set(section,'dropout_fraction', parameters['p'])
hp.set(section,'weight_decay',10**float(parameters['lg_l']))
hp.set(section,'batch_size', parameters['batch_size'])
hp.write(open(cfgname,'w'))
print(('Lowest function value found: %f'%value))
print(('Parameter setting %s'%parameters))
c += 1
end_time = timeit.default_timer()
training_time = (end_time - start_time)
print 'Training time: %.2f h' % (training_time/3600.)
|
mit
|
meduz/scikit-learn
|
benchmarks/bench_lasso.py
|
111
|
3364
|
"""
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import matplotlib.pyplot as plt
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
plt.figure('scikit-learn LASSO benchmark results')
plt.subplot(211)
plt.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
plt.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
plt.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features,
alpha))
plt.legend(loc='upper left')
plt.xlabel('number of samples')
plt.ylabel('Time (s)')
plt.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
plt.subplot(212)
plt.plot(list_n_features, lasso_results, 'b-', label='Lasso')
plt.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
plt.title('%d samples, alpha=%s' % (n_samples, alpha))
plt.legend(loc='upper left')
plt.xlabel('number of features')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
mcdeaton13/dynamic
|
Data/Calibration/ConsumptionParameters/create_Pi_matrix.py
|
2
|
5842
|
'''
------------------------------------------------------------------------
Last updated 03/12/2015
Read in BEA's 2007 PCE Bridge data, use these data to
create matrix relating production industry output to
consumption good categories. This matrix is called
'Pi'.
This py-file calls the following other file(s):
PCEBridge_2007_Detail.xlsx
nipa_cons_category_crosswalk.xlsx
prod_sector_crosswalk.xlsx
This py-file creates the following other file(s):
(make sure that an OUTPUT folder exists)
OUTPUT/Pi_mat.pkl
------------------------------------------------------------------------
'''
'''
------------------------------------------------------------------------
Packages
------------------------------------------------------------------------
'''
import os.path
import numpy as np
import pandas as pd
import xlrd
'''
------------------------------------------------------------------------
------------------------------------------------------------------------
------------------------------------------------------------------------
'''
# Working directory and input file name
path = '/Users/jasondebacker/repos/dynamic/Data/Calibration/ConsumptionParameters/'
pce_bridge_name = 'PCEBridge_2007_Detail.xlsx'
cons_xwalk_name = 'nipa_cons_category_crosswalk.xlsx'
prod_xwalk_name = 'prod_industry_crosswalk.xlsx'
sheet_name = '2007'
# read in the BEA's PCE Bridge File
pce_bridge_df = pd.read_excel(path + pce_bridge_name,
sheetname=sheet_name,header=5)
# read in the NIPA consumption crosswalk
cons_cat_df = pd.read_excel(path + cons_xwalk_name,header=0)
# read in the BEA/NAICS production industry crosswalk
prod_ind_df = pd.read_excel(path + prod_xwalk_name,header=0)
# rename columns (since problem reading headers with apostrophes)
pce_bridge_df.rename(columns={'Unnamed: 4': 'Producer Value',
'Unnamed: 5': 'Transportation Value',
'Unnamed: 8': 'Purchasers Value'}, inplace=True)
# rename to shorter variable names
pce_bridge_df.rename(columns={'NIPA Line': 'nipa_line', 'PCE Category':
'pce_category','Commodity Code': 'comm_code',
'Commodity Description': 'comm_descrip',
'Producer Value': 'prod_value',
'Transportation Value': 'trans_value',
'Wholesale': 'whole_value',
'Retail': 'retail_value',
'Purchasers Value': 'purch_value',
'Year': 'year'}, inplace=True)
# merge w/ x-walk to get cons categories for model
pce_bridge_df = pd.merge(pce_bridge_df,cons_cat_df,how='left',on='nipa_line')
# merge w/ x-walk to get production industries for model
pce_bridge_df = pd.merge(pce_bridge_df,prod_ind_df,how='left',on='comm_code')
# collapse data by model consumption category and production industry
pce_bridge_sums_df = pce_bridge_df.groupby(['cons_cat','production_industry']
,as_index=False)['prod_value',
'trans_value','whole_value',
'retail_value','purch_value'].sum()
prod_sum_df = pce_bridge_sums_df[['cons_cat','production_industry',
'prod_value']]
prod_sum_df.rename(columns={'prod_value': 'value'}, inplace=True)
# Put zeros for categories with no cons goods
# The fills values to make matrix IxM
prod_sum_df.loc[len(prod_sum_df)+1]=[0,2,0]
prod_sum_df.loc[len(prod_sum_df)+1]=[0,5,0]
prod_sum_df.loc[len(prod_sum_df)+1]=[0,17,0]
prod_sum_df.to_csv(path+'test_prod_sum.csv', index=False)
# Create totals for transport, whole sale, retail
trans_sum_df = pce_bridge_df.groupby(['cons_cat'],
as_index=False)['trans_value'].sum()
trans_sum_df.rename(columns={'trans_value': 'value'}, inplace=True)
trans_sum_df['production_industry'] = 12
retail_sum_df = pce_bridge_df.groupby(['cons_cat'],
as_index=False)['retail_value'].sum()
retail_sum_df.rename(columns={'retail_value': 'value'}, inplace=True)
retail_sum_df['production_industry'] = 11
whole_sum_df = pce_bridge_df.groupby(['cons_cat'],
as_index=False)['whole_value'].sum()
whole_sum_df.rename(columns={'whole_value': 'value'}, inplace=True)
whole_sum_df['production_industry'] = 10
# append data so have each prod industry together
pi_long_df=whole_sum_df.append([retail_sum_df,trans_sum_df,prod_sum_df])
# collaspe again, because some transport, etc in prod industries
pi_long_df = pi_long_df.groupby(['cons_cat','production_industry']
,as_index=False)['value'].sum()
# pivot table to create matrix
pi_mat_df=pd.pivot_table(pi_long_df,index='production_industry',columns='cons_cat', values='value',fill_value=0)
# put in percentages instead of dollars (sum percent over cons_cat =1)
# I'm sure there is more elegant way than using a merge...
con_tot_df = pi_long_df.groupby(["cons_cat"],as_index=False)['value'].sum()
con_tot_df.rename(columns={'value': 'cons_total'}, inplace=True)
pi_long_pct_df = pd.merge(pi_long_df,con_tot_df,how='left',on='cons_cat')
pi_long_pct_df['fraction'] = pi_long_pct_df['value']/pi_long_pct_df['cons_total']
# pivot table to create matrix
pi_pct_mat_df=pd.pivot_table(pi_long_pct_df,index='production_industry',
columns='cons_cat', values='fraction',fill_value=0)
# save files to csv
pi_long_pct_df.to_csv(path+'test_Pi_long.csv', index=False)
pi_pct_mat_df.to_csv(path+'test_Pi_mat_pct.csv', index=False)
pi_mat_df.to_csv(path+'test_Pi_mat.csv', index=False)
|
mit
|
ankurankan/scikit-learn
|
examples/ensemble/plot_forest_importances.py
|
241
|
1761
|
"""
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(10), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(10), indices)
plt.xlim([-1, 10])
plt.show()
|
bsd-3-clause
|
buntyke/GPy
|
GPy/testing/likelihood_tests.py
|
8
|
35323
|
# Copyright (c) 2014, Alan Saul
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import unittest
import GPy
from GPy.models import GradientChecker
import functools
import inspect
from GPy.likelihoods import link_functions
from functools import partial
fixed_seed = 7
#np.seterr(divide='raise')
def dparam_partial(inst_func, *args):
"""
If we have a instance method that needs to be called but that doesn't
take the parameter we wish to change to checkgrad, then this function
will change the variable using set params.
inst_func: should be a instance function of an object that we would like
to change
param: the param that will be given to set_params
args: anything else that needs to be given to the function (for example
the f or Y that are being used in the function whilst we tweak the
param
"""
def param_func(param_val, param_name, inst_func, args):
#inst_func.__self__._set_params(param)
#inst_func.__self__.add_parameter(Param(param_name, param_val))
inst_func.__self__[param_name] = param_val
return inst_func(*args)
return functools.partial(param_func, inst_func=inst_func, args=args)
def dparam_checkgrad(func, dfunc, params, params_names, args, constraints=None, randomize=False, verbose=False):
"""
checkgrad expects a f: R^N -> R^1 and df: R^N -> R^N
However if we are holding other parameters fixed and moving something else
We need to check the gradient of each of the fixed parameters
(f and y for example) seperately, whilst moving another parameter.
Otherwise f: gives back R^N and
df: gives back R^NxM where M is
The number of parameters and N is the number of data
Need to take a slice out from f and a slice out of df
"""
print("\n{} likelihood: {} vs {}".format(func.__self__.__class__.__name__,
func.__name__, dfunc.__name__))
partial_f = dparam_partial(func, *args)
partial_df = dparam_partial(dfunc, *args)
gradchecking = True
zipped_params = zip(params, params_names)
for param_ind, (param_val, param_name) in enumerate(zipped_params):
#Check one parameter at a time, make sure it is 2d (as some gradients only return arrays) then strip out the parameter
f_ = partial_f(param_val, param_name)
df_ = partial_df(param_val, param_name)
#Reshape it such that we have a 3d matrix incase, that is we want it (?, N, D) regardless of whether ? is num_params or not
f_ = f_.reshape(-1, f_.shape[0], f_.shape[1])
df_ = df_.reshape(-1, f_.shape[0], f_.shape[1])
#Get the number of f and number of dimensions
fnum = f_.shape[-2]
fdim = f_.shape[-1]
dfnum = df_.shape[-2]
for fixed_val in range(dfnum):
#dlik and dlik_dvar gives back 1 value for each
f_ind = min(fnum, fixed_val+1) - 1
print("fnum: {} dfnum: {} f_ind: {} fixed_val: {}".format(fnum, dfnum, f_ind, fixed_val))
#Make grad checker with this param moving, note that set_params is NOT being called
#The parameter is being set directly with __setattr__
#Check only the parameter and function value we wish to check at a time
#func = lambda p_val, fnum, fdim, param_ind, f_ind, param_ind: partial_f(p_val, param_name).reshape(-1, fnum, fdim)[param_ind, f_ind, :]
#dfunc_dparam = lambda d_val, fnum, fdim, param_ind, fixed_val: partial_df(d_val, param_name).reshape(-1, fnum, fdim)[param_ind, fixed_val, :]
#First we reshape the output such that it is (num_params, N, D) then we pull out the relavent parameter-findex and checkgrad just this index at a time
func = lambda p_val: partial_f(p_val, param_name).reshape(-1, fnum, fdim)[param_ind, f_ind, :]
dfunc_dparam = lambda d_val: partial_df(d_val, param_name).reshape(-1, fnum, fdim)[param_ind, fixed_val, :]
grad = GradientChecker(func, dfunc_dparam, param_val, [param_name])
if constraints is not None:
for constrain_param, constraint in constraints:
if grad.grep_param_names(constrain_param):
constraint(constrain_param, grad)
else:
print("parameter didn't exist")
print(constrain_param, " ", constraint)
if randomize:
grad.randomize()
if verbose:
print(grad)
grad.checkgrad(verbose=1)
if not grad.checkgrad(verbose=True):
gradchecking = False
if not grad.checkgrad(verbose=True):
gradchecking = False
return gradchecking
from nose.tools import with_setup
class TestNoiseModels(object):
"""
Generic model checker
"""
def setUp(self):
np.random.seed(fixed_seed)
self.N = 15
self.D = 3
self.X = np.random.rand(self.N, self.D)*10
self.real_std = 0.1
noise = np.random.randn(*self.X[:, 0].shape)*self.real_std
self.Y = (np.sin(self.X[:, 0]*2*np.pi) + noise)[:, None]
self.f = np.random.rand(self.N, 1)
self.binary_Y = np.asarray(np.random.rand(self.N) > 0.5, dtype=np.int)[:, None]
self.positive_Y = np.exp(self.Y.copy())
tmp = np.round(self.X[:, 0]*3-3)[:, None] + np.random.randint(0,3, self.X.shape[0])[:, None]
self.integer_Y = np.where(tmp > 0, tmp, 0)
self.var = 0.2
self.deg_free = 4.0
#Make a bigger step as lower bound can be quite curved
self.step = 1e-4
"""
Dictionary where we nest models we would like to check
Name: {
"model": model_instance,
"grad_params": {
"names": [names_of_params_we_want, to_grad_check],
"vals": [values_of_params, to_start_at],
"constrain": [constraint_wrappers, listed_here]
},
"laplace": boolean_of_whether_model_should_work_for_laplace,
"ep": boolean_of_whether_model_should_work_for_laplace,
"link_f_constraints": [constraint_wrappers, listed_here]
}
"""
self.noise_models = {"Student_t_default": {
"model": GPy.likelihoods.StudentT(deg_free=self.deg_free, sigma2=self.var),
"grad_params": {
"names": [".*t_scale2"],
"vals": [self.var],
"constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
},
"laplace": True
},
#"Student_t_deg_free": {
#"model": GPy.likelihoods.StudentT(deg_free=self.deg_free, sigma2=self.var),
#"grad_params": {
#"names": [".*deg_free"],
#"vals": [self.deg_free],
#"constraints": [(".*t_scale2", self.constrain_fixed), (".*deg_free", self.constrain_positive)]
#},
#"laplace": True
#},
"Student_t_1_var": {
"model": GPy.likelihoods.StudentT(deg_free=self.deg_free, sigma2=self.var),
"grad_params": {
"names": [".*t_scale2"],
"vals": [1.0],
"constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
},
"laplace": True
},
"Student_t_small_deg_free": {
"model": GPy.likelihoods.StudentT(deg_free=1.5, sigma2=self.var),
"grad_params": {
"names": [".*t_scale2"],
"vals": [self.var],
"constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
},
"laplace": True
},
"Student_t_small_var": {
"model": GPy.likelihoods.StudentT(deg_free=self.deg_free, sigma2=self.var),
"grad_params": {
"names": [".*t_scale2"],
"vals": [0.001],
"constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
},
"laplace": True
},
"Student_t_large_var": {
"model": GPy.likelihoods.StudentT(deg_free=self.deg_free, sigma2=self.var),
"grad_params": {
"names": [".*t_scale2"],
"vals": [10.0],
"constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
},
"laplace": True
},
"Student_t_approx_gauss": {
"model": GPy.likelihoods.StudentT(deg_free=1000, sigma2=self.var),
"grad_params": {
"names": [".*t_scale2"],
"vals": [self.var],
"constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
},
"laplace": True
},
#"Student_t_log": {
#"model": GPy.likelihoods.StudentT(gp_link=link_functions.Log(), deg_free=5, sigma2=self.var),
#"grad_params": {
#"names": [".*t_noise"],
#"vals": [self.var],
#"constraints": [(".*t_noise", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
#},
#"laplace": True
#},
"Gaussian_default": {
"model": GPy.likelihoods.Gaussian(variance=self.var),
"grad_params": {
"names": [".*variance"],
"vals": [self.var],
"constraints": [(".*variance", self.constrain_positive)]
},
"laplace": True,
"ep": False, # FIXME: Should be True when we have it working again
"variational_expectations": True,
},
"Gaussian_log": {
"model": GPy.likelihoods.Gaussian(gp_link=link_functions.Log(), variance=self.var),
"grad_params": {
"names": [".*variance"],
"vals": [self.var],
"constraints": [(".*variance", self.constrain_positive)]
},
"laplace": True,
"variational_expectations": True
},
#"Gaussian_probit": {
#"model": GPy.likelihoods.gaussian(gp_link=link_functions.Probit(), variance=self.var, D=self.D, N=self.N),
#"grad_params": {
#"names": ["noise_model_variance"],
#"vals": [self.var],
#"constraints": [constrain_positive]
#},
#"laplace": True
#},
#"Gaussian_log_ex": {
#"model": GPy.likelihoods.gaussian(gp_link=link_functions.Log_ex_1(), variance=self.var, D=self.D, N=self.N),
#"grad_params": {
#"names": ["noise_model_variance"],
#"vals": [self.var],
#"constraints": [constrain_positive]
#},
#"laplace": True
#},
"Bernoulli_default": {
"model": GPy.likelihoods.Bernoulli(),
"link_f_constraints": [partial(self.constrain_bounded, lower=0, upper=1)],
"laplace": True,
"Y": self.binary_Y,
"ep": False, # FIXME: Should be True when we have it working again
"variational_expectations": True
},
"Exponential_default": {
"model": GPy.likelihoods.Exponential(),
"link_f_constraints": [self.constrain_positive],
"Y": self.positive_Y,
"laplace": True,
},
"Poisson_default": {
"model": GPy.likelihoods.Poisson(),
"link_f_constraints": [self.constrain_positive],
"Y": self.integer_Y,
"laplace": True,
"ep": False #Should work though...
},
#,
#GAMMA needs some work!"Gamma_default": {
#"model": GPy.likelihoods.Gamma(),
#"link_f_constraints": [constrain_positive],
#"Y": self.positive_Y,
#"laplace": True
#}
}
####################################################
# Constraint wrappers so we can just list them off #
####################################################
def constrain_fixed(self, regex, model):
model[regex].constrain_fixed()
def constrain_negative(self, regex, model):
model[regex].constrain_negative()
def constrain_positive(self, regex, model):
model[regex].constrain_positive()
def constrain_fixed_below(self, regex, model, up_to):
model[regex][0:up_to].constrain_fixed()
def constrain_fixed_above(self, regex, model, above):
model[regex][above:].constrain_fixed()
def constrain_bounded(self, regex, model, lower, upper):
"""
Used like: partial(constrain_bounded, lower=0, upper=1)
"""
model[regex].constrain_bounded(lower, upper)
def tearDown(self):
self.Y = None
self.f = None
self.X = None
def test_scale2_models(self):
self.setUp()
for name, attributes in self.noise_models.items():
model = attributes["model"]
if "grad_params" in attributes:
params = attributes["grad_params"]
param_vals = params["vals"]
param_names= params["names"]
param_constraints = params["constraints"]
else:
params = []
param_vals = []
param_names = []
constrain_positive = []
param_constraints = []
if "link_f_constraints" in attributes:
link_f_constraints = attributes["link_f_constraints"]
else:
link_f_constraints = []
if "Y" in attributes:
Y = attributes["Y"].copy()
else:
Y = self.Y.copy()
if "f" in attributes:
f = attributes["f"].copy()
else:
f = self.f.copy()
if "Y_metadata" in attributes:
Y_metadata = attributes["Y_metadata"].copy()
else:
Y_metadata = None
if "laplace" in attributes:
laplace = attributes["laplace"]
else:
laplace = False
if "ep" in attributes:
ep = attributes["ep"]
else:
ep = False
if "variational_expectations" in attributes:
var_exp = attributes["variational_expectations"]
else:
var_exp = False
#if len(param_vals) > 1:
#raise NotImplementedError("Cannot support multiple params in likelihood yet!")
#Required by all
#Normal derivatives
yield self.t_logpdf, model, Y, f, Y_metadata
yield self.t_dlogpdf_df, model, Y, f, Y_metadata
yield self.t_d2logpdf_df2, model, Y, f, Y_metadata
#Link derivatives
yield self.t_dlogpdf_dlink, model, Y, f, Y_metadata, link_f_constraints
yield self.t_d2logpdf_dlink2, model, Y, f, Y_metadata, link_f_constraints
if laplace:
#Laplace only derivatives
yield self.t_d3logpdf_df3, model, Y, f, Y_metadata
yield self.t_d3logpdf_dlink3, model, Y, f, Y_metadata, link_f_constraints
#Params
yield self.t_dlogpdf_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints
yield self.t_dlogpdf_df_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints
yield self.t_d2logpdf2_df2_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints
#Link params
yield self.t_dlogpdf_link_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints
yield self.t_dlogpdf_dlink_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints
yield self.t_d2logpdf2_dlink2_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints
#laplace likelihood gradcheck
yield self.t_laplace_fit_rbf_white, model, self.X, Y, f, Y_metadata, self.step, param_vals, param_names, param_constraints
if ep:
#ep likelihood gradcheck
yield self.t_ep_fit_rbf_white, model, self.X, Y, f, Y_metadata, self.step, param_vals, param_names, param_constraints
if var_exp:
#Need to specify mu and var!
yield self.t_varexp, model, Y, Y_metadata
yield self.t_dexp_dmu, model, Y, Y_metadata
yield self.t_dexp_dvar, model, Y, Y_metadata
self.tearDown()
#############
# dpdf_df's #
#############
@with_setup(setUp, tearDown)
def t_logpdf(self, model, Y, f, Y_metadata):
print("\n{}".format(inspect.stack()[0][3]))
print(model)
#print model._get_params()
np.testing.assert_almost_equal(
model.pdf(f.copy(), Y.copy(), Y_metadata=Y_metadata).prod(),
np.exp(model.logpdf(f.copy(), Y.copy(), Y_metadata=Y_metadata).sum())
)
@with_setup(setUp, tearDown)
def t_dlogpdf_df(self, model, Y, f, Y_metadata):
print("\n{}".format(inspect.stack()[0][3]))
self.description = "\n{}".format(inspect.stack()[0][3])
logpdf = functools.partial(np.sum(model.logpdf), y=Y, Y_metadata=Y_metadata)
dlogpdf_df = functools.partial(model.dlogpdf_df, y=Y, Y_metadata=Y_metadata)
grad = GradientChecker(logpdf, dlogpdf_df, f.copy(), 'g')
grad.randomize()
print(model)
assert grad.checkgrad(verbose=1)
@with_setup(setUp, tearDown)
def t_d2logpdf_df2(self, model, Y, f, Y_metadata):
print("\n{}".format(inspect.stack()[0][3]))
dlogpdf_df = functools.partial(model.dlogpdf_df, y=Y, Y_metadata=Y_metadata)
d2logpdf_df2 = functools.partial(model.d2logpdf_df2, y=Y, Y_metadata=Y_metadata)
grad = GradientChecker(dlogpdf_df, d2logpdf_df2, f.copy(), 'g')
grad.randomize()
print(model)
assert grad.checkgrad(verbose=1)
@with_setup(setUp, tearDown)
def t_d3logpdf_df3(self, model, Y, f, Y_metadata):
print("\n{}".format(inspect.stack()[0][3]))
d2logpdf_df2 = functools.partial(model.d2logpdf_df2, y=Y, Y_metadata=Y_metadata)
d3logpdf_df3 = functools.partial(model.d3logpdf_df3, y=Y, Y_metadata=Y_metadata)
grad = GradientChecker(d2logpdf_df2, d3logpdf_df3, f.copy(), 'g')
grad.randomize()
print(model)
assert grad.checkgrad(verbose=1)
##############
# df_dparams #
##############
@with_setup(setUp, tearDown)
def t_dlogpdf_dparams(self, model, Y, f, Y_metadata, params, params_names, param_constraints):
print("\n{}".format(inspect.stack()[0][3]))
print(model)
assert (
dparam_checkgrad(model.logpdf, model.dlogpdf_dtheta,
params, params_names, args=(f, Y, Y_metadata), constraints=param_constraints,
randomize=False, verbose=True)
)
@with_setup(setUp, tearDown)
def t_dlogpdf_df_dparams(self, model, Y, f, Y_metadata, params, params_names, param_constraints):
print("\n{}".format(inspect.stack()[0][3]))
print(model)
assert (
dparam_checkgrad(model.dlogpdf_df, model.dlogpdf_df_dtheta,
params, params_names, args=(f, Y, Y_metadata), constraints=param_constraints,
randomize=False, verbose=True)
)
@with_setup(setUp, tearDown)
def t_d2logpdf2_df2_dparams(self, model, Y, f, Y_metadata, params, params_names, param_constraints):
print("\n{}".format(inspect.stack()[0][3]))
print(model)
assert (
dparam_checkgrad(model.d2logpdf_df2, model.d2logpdf_df2_dtheta,
params, params_names, args=(f, Y, Y_metadata), constraints=param_constraints,
randomize=False, verbose=True)
)
################
# dpdf_dlink's #
################
@with_setup(setUp, tearDown)
def t_dlogpdf_dlink(self, model, Y, f, Y_metadata, link_f_constraints):
print("\n{}".format(inspect.stack()[0][3]))
logpdf = functools.partial(model.logpdf_link, y=Y, Y_metadata=Y_metadata)
dlogpdf_dlink = functools.partial(model.dlogpdf_dlink, y=Y, Y_metadata=Y_metadata)
grad = GradientChecker(logpdf, dlogpdf_dlink, f.copy(), 'g')
#Apply constraints to link_f values
for constraint in link_f_constraints:
constraint('g', grad)
grad.randomize()
print(grad)
print(model)
assert grad.checkgrad(verbose=1)
@with_setup(setUp, tearDown)
def t_d2logpdf_dlink2(self, model, Y, f, Y_metadata, link_f_constraints):
print("\n{}".format(inspect.stack()[0][3]))
dlogpdf_dlink = functools.partial(model.dlogpdf_dlink, y=Y, Y_metadata=Y_metadata)
d2logpdf_dlink2 = functools.partial(model.d2logpdf_dlink2, y=Y, Y_metadata=Y_metadata)
grad = GradientChecker(dlogpdf_dlink, d2logpdf_dlink2, f.copy(), 'g')
#Apply constraints to link_f values
for constraint in link_f_constraints:
constraint('g', grad)
grad.randomize()
print(grad)
print(model)
assert grad.checkgrad(verbose=1)
@with_setup(setUp, tearDown)
def t_d3logpdf_dlink3(self, model, Y, f, Y_metadata, link_f_constraints):
print("\n{}".format(inspect.stack()[0][3]))
d2logpdf_dlink2 = functools.partial(model.d2logpdf_dlink2, y=Y, Y_metadata=Y_metadata)
d3logpdf_dlink3 = functools.partial(model.d3logpdf_dlink3, y=Y, Y_metadata=Y_metadata)
grad = GradientChecker(d2logpdf_dlink2, d3logpdf_dlink3, f.copy(), 'g')
#Apply constraints to link_f values
for constraint in link_f_constraints:
constraint('g', grad)
grad.randomize()
print(grad)
print(model)
assert grad.checkgrad(verbose=1)
#################
# dlink_dparams #
#################
@with_setup(setUp, tearDown)
def t_dlogpdf_link_dparams(self, model, Y, f, Y_metadata, params, param_names, param_constraints):
print("\n{}".format(inspect.stack()[0][3]))
print(model)
assert (
dparam_checkgrad(model.logpdf_link, model.dlogpdf_link_dtheta,
params, param_names, args=(f, Y, Y_metadata), constraints=param_constraints,
randomize=False, verbose=True)
)
@with_setup(setUp, tearDown)
def t_dlogpdf_dlink_dparams(self, model, Y, f, Y_metadata, params, param_names, param_constraints):
print("\n{}".format(inspect.stack()[0][3]))
print(model)
assert (
dparam_checkgrad(model.dlogpdf_dlink, model.dlogpdf_dlink_dtheta,
params, param_names, args=(f, Y, Y_metadata), constraints=param_constraints,
randomize=False, verbose=True)
)
@with_setup(setUp, tearDown)
def t_d2logpdf2_dlink2_dparams(self, model, Y, f, Y_metadata, params, param_names, param_constraints):
print("\n{}".format(inspect.stack()[0][3]))
print(model)
assert (
dparam_checkgrad(model.d2logpdf_dlink2, model.d2logpdf_dlink2_dtheta,
params, param_names, args=(f, Y, Y_metadata), constraints=param_constraints,
randomize=False, verbose=True)
)
################
# laplace test #
################
@with_setup(setUp, tearDown)
def t_laplace_fit_rbf_white(self, model, X, Y, f, Y_metadata, step, param_vals, param_names, constraints):
print("\n{}".format(inspect.stack()[0][3]))
#Normalize
Y = Y/Y.max()
white_var = 1e-5
kernel = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
laplace_likelihood = GPy.inference.latent_function_inference.Laplace()
m = GPy.core.GP(X.copy(), Y.copy(), kernel, likelihood=model, Y_metadata=Y_metadata, inference_method=laplace_likelihood)
m['.*white'].constrain_fixed(white_var)
#Set constraints
for constrain_param, constraint in constraints:
constraint(constrain_param, m)
print(m)
m.randomize()
m.randomize()
#Set params
for param_num in range(len(param_names)):
name = param_names[param_num]
m[name] = param_vals[param_num]
#m.optimize(max_iters=8)
print(m)
#if not m.checkgrad(step=step):
#m.checkgrad(verbose=1, step=step)
#NOTE this test appears to be stochastic for some likelihoods (student t?)
# appears to all be working in test mode right now...
#if isinstance(model, GPy.likelihoods.StudentT):
assert m.checkgrad(verbose=1, step=step)
###########
# EP test #
###########
@with_setup(setUp, tearDown)
def t_ep_fit_rbf_white(self, model, X, Y, f, Y_metadata, step, param_vals, param_names, constraints):
print("\n{}".format(inspect.stack()[0][3]))
#Normalize
Y = Y/Y.max()
white_var = 1e-6
kernel = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
ep_inf = GPy.inference.latent_function_inference.EP()
m = GPy.core.GP(X.copy(), Y.copy(), kernel=kernel, likelihood=model, Y_metadata=Y_metadata, inference_method=ep_inf)
m['.*white'].constrain_fixed(white_var)
for param_num in range(len(param_names)):
name = param_names[param_num]
m[name] = param_vals[param_num]
constraints[param_num](name, m)
m.randomize()
print(m)
assert m.checkgrad(verbose=1, step=step)
################
# variational expectations #
################
@with_setup(setUp, tearDown)
def t_varexp(self, model, Y, Y_metadata):
#Test that the analytic implementation (if it exists) matches the generic gauss
#hermite implementation
print("\n{}".format(inspect.stack()[0][3]))
#Make mu and var (marginal means and variances of q(f)) draws from a GP
k = GPy.kern.RBF(1).K(np.linspace(0,1,Y.shape[0])[:, None])
L = GPy.util.linalg.jitchol(k)
mu = L.dot(np.random.randn(*Y.shape))
#Variance must be positive
var = np.abs(L.dot(np.random.randn(*Y.shape))) + 0.01
expectation = model.variational_expectations(Y=Y, m=mu, v=var, gh_points=None, Y_metadata=Y_metadata)[0]
#Implementation of gauss hermite integration
shape = mu.shape
gh_x, gh_w= np.polynomial.hermite.hermgauss(50)
m,v,Y = mu.flatten(), var.flatten(), Y.flatten()
#make a grid of points
X = gh_x[None,:]*np.sqrt(2.*v[:,None]) + m[:,None]
#evaluate the likelhood for the grid. First ax indexes the data (and mu, var) and the second indexes the grid.
# broadcast needs to be handled carefully.
logp = model.logpdf(X, Y[:,None], Y_metadata=Y_metadata)
#average over the gird to get derivatives of the Gaussian's parameters
#division by pi comes from fact that for each quadrature we need to scale by 1/sqrt(pi)
expectation_gh = np.dot(logp, gh_w)/np.sqrt(np.pi)
expectation_gh = expectation_gh.reshape(*shape)
np.testing.assert_almost_equal(expectation, expectation_gh, decimal=5)
@with_setup(setUp, tearDown)
def t_dexp_dmu(self, model, Y, Y_metadata):
print("\n{}".format(inspect.stack()[0][3]))
#Make mu and var (marginal means and variances of q(f)) draws from a GP
k = GPy.kern.RBF(1).K(np.linspace(0,1,Y.shape[0])[:, None])
L = GPy.util.linalg.jitchol(k)
mu = L.dot(np.random.randn(*Y.shape))
#Variance must be positive
var = np.abs(L.dot(np.random.randn(*Y.shape))) + 0.01
expectation = functools.partial(model.variational_expectations, Y=Y, v=var, gh_points=None, Y_metadata=Y_metadata)
#Function to get the nth returned value
def F(mu):
return expectation(m=mu)[0]
def dmu(mu):
return expectation(m=mu)[1]
grad = GradientChecker(F, dmu, mu.copy(), 'm')
grad.randomize()
print(grad)
print(model)
assert grad.checkgrad(verbose=1)
@with_setup(setUp, tearDown)
def t_dexp_dvar(self, model, Y, Y_metadata):
print("\n{}".format(inspect.stack()[0][3]))
#Make mu and var (marginal means and variances of q(f)) draws from a GP
k = GPy.kern.RBF(1).K(np.linspace(0,1,Y.shape[0])[:, None])
L = GPy.util.linalg.jitchol(k)
mu = L.dot(np.random.randn(*Y.shape))
#Variance must be positive
var = np.abs(L.dot(np.random.randn(*Y.shape))) + 0.01
expectation = functools.partial(model.variational_expectations, Y=Y, m=mu, gh_points=None, Y_metadata=Y_metadata)
#Function to get the nth returned value
def F(var):
return expectation(v=var)[0]
def dvar(var):
return expectation(v=var)[2]
grad = GradientChecker(F, dvar, var.copy(), 'v')
self.constrain_positive('v', grad)
#grad.randomize()
print(grad)
print(model)
assert grad.checkgrad(verbose=1)
class LaplaceTests(unittest.TestCase):
"""
Specific likelihood tests, not general enough for the above tests
"""
def setUp(self):
np.random.seed(fixed_seed)
self.N = 15
self.D = 1
self.X = np.random.rand(self.N, self.D)*10
self.real_std = 0.1
noise = np.random.randn(*self.X[:, 0].shape)*self.real_std
self.Y = (np.sin(self.X[:, 0]*2*np.pi) + noise)[:, None]
self.f = np.random.rand(self.N, 1)
self.var = 0.2
self.var = np.random.rand(1)
self.stu_t = GPy.likelihoods.StudentT(deg_free=5, sigma2=self.var)
#TODO: gaussians with on Identity link. self.gauss = GPy.likelihoods.Gaussian(gp_link=link_functions.Log(), variance=self.var)
self.gauss = GPy.likelihoods.Gaussian(variance=self.var)
#Make a bigger step as lower bound can be quite curved
self.step = 1e-6
def tearDown(self):
self.stu_t = None
self.gauss = None
self.Y = None
self.f = None
self.X = None
def test_gaussian_d2logpdf_df2_2(self):
print("\n{}".format(inspect.stack()[0][3]))
self.Y = None
self.N = 2
self.D = 1
self.X = np.linspace(0, self.D, self.N)[:, None]
self.real_std = 0.2
noise = np.random.randn(*self.X.shape)*self.real_std
self.Y = np.sin(self.X*2*np.pi) + noise
self.f = np.random.rand(self.N, 1)
dlogpdf_df = functools.partial(self.gauss.dlogpdf_df, y=self.Y)
d2logpdf_df2 = functools.partial(self.gauss.d2logpdf_df2, y=self.Y)
grad = GradientChecker(dlogpdf_df, d2logpdf_df2, self.f.copy(), 'g')
grad.randomize()
self.assertTrue(grad.checkgrad(verbose=1))
def test_laplace_log_likelihood(self):
debug = False
real_std = 0.1
initial_var_guess = 0.5
#Start a function, any function
X = np.linspace(0.0, np.pi*2, 100)[:, None]
Y = np.sin(X) + np.random.randn(*X.shape)*real_std
Y = Y/Y.max()
#Yc = Y.copy()
#Yc[75:80] += 1
kernel1 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
#FIXME: Make sure you can copy kernels when params is fixed
#kernel2 = kernel1.copy()
kernel2 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
gauss_distr1 = GPy.likelihoods.Gaussian(variance=initial_var_guess)
exact_inf = GPy.inference.latent_function_inference.ExactGaussianInference()
m1 = GPy.core.GP(X, Y.copy(), kernel=kernel1, likelihood=gauss_distr1, inference_method=exact_inf)
m1['.*white'].constrain_fixed(1e-6)
m1['.*Gaussian_noise.variance'].constrain_bounded(1e-4, 10)
m1.randomize()
gauss_distr2 = GPy.likelihoods.Gaussian(variance=initial_var_guess)
laplace_inf = GPy.inference.latent_function_inference.Laplace()
m2 = GPy.core.GP(X, Y.copy(), kernel=kernel2, likelihood=gauss_distr2, inference_method=laplace_inf)
m2['.*white'].constrain_fixed(1e-6)
m2['.*Gaussian_noise.variance'].constrain_bounded(1e-4, 10)
m2.randomize()
if debug:
print(m1)
print(m2)
optimizer = 'scg'
print("Gaussian")
m1.optimize(optimizer, messages=debug, ipython_notebook=False)
print("Laplace Gaussian")
m2.optimize(optimizer, messages=debug, ipython_notebook=False)
if debug:
print(m1)
print(m2)
m2[:] = m1[:]
#Predict for training points to get posterior mean and variance
post_mean, post_var = m1.predict(X)
post_mean_approx, post_var_approx, = m2.predict(X)
if debug:
from matplotlib import pyplot as pb
pb.figure(5)
pb.title('posterior means')
pb.scatter(X, post_mean, c='g')
pb.scatter(X, post_mean_approx, c='r', marker='x')
pb.figure(6)
pb.title('plot_f')
m1.plot_f(fignum=6)
m2.plot_f(fignum=6)
fig, axes = pb.subplots(2, 1)
fig.suptitle('Covariance matricies')
a1 = pb.subplot(121)
a1.matshow(m1.likelihood.covariance_matrix)
a2 = pb.subplot(122)
a2.matshow(m2.likelihood.covariance_matrix)
pb.figure(8)
pb.scatter(X, m1.likelihood.Y, c='g')
pb.scatter(X, m2.likelihood.Y, c='r', marker='x')
#Check Y's are the same
np.testing.assert_almost_equal(m1.Y, m2.Y, decimal=5)
#Check marginals are the same
np.testing.assert_almost_equal(m1.log_likelihood(), m2.log_likelihood(), decimal=2)
#Check marginals are the same with random
m1.randomize()
m2[:] = m1[:]
np.testing.assert_almost_equal(m1.log_likelihood(), m2.log_likelihood(), decimal=2)
#Check they are checkgradding
#m1.checkgrad(verbose=1)
#m2.checkgrad(verbose=1)
self.assertTrue(m1.checkgrad(verbose=True))
self.assertTrue(m2.checkgrad(verbose=True))
if __name__ == "__main__":
print("Running unit tests")
unittest.main()
|
mit
|
rhwhite/eventTracking
|
analysis/plot_regress_precipOLR_density.py
|
1
|
11246
|
# coding: utf-8
# In[2]:
import os, errno
import netCDF4
import numpy as np
import datetime as dt
import pandas
import xarray as xr
#import Ngl
#import math
from scipy import stats
from scipy.interpolate import griddata
from rhwhitepackages.readwrite import *
from rhwhitepackages.stats import regressmaps
from rhwhitepackages.plots import *
# plotting
# import matplotlib
import xray.plot as xplt
import matplotlib.pyplot as plt
import matplotlib.patches as patches
plotboth = True
pthresh = 0.05
FigDir = '/home/disk/eos4/rachel/Figures/PrecipEvents/'
MinLatF = -40
MaxLatF = 40
MinLonF = 0
MaxLonF = 360
minLats = [ 2, 8, 22,-40]
maxLats = [ 8, 20, 35,-25]
minLons = [200,155,150, 45]
maxLons = [280,200,180, 75]
nboxes = len(minLats)
# In[3]:
#Select lats, lons, bounds, and other parameters
anstartyr = 1999
anendyr = 2014
splittype = 'day'
tbound1 = [0.0,1.0,2.0,5.0]
tbound2 = [1.0,2.0,5.0,100.0]
#tbound1 = [1.0, 1.125, 1.5, 1.875]
#tbound2 = [1.125, 1.25, 1.625, 2.0]
nbounds = len(tbound1)
if len(tbound2) != nbounds:
exit("tbound arrays not equal lengths")
unit = 'day'
if splittype == 'day':
diradd = '/Sizes/'
elif splittype == 'MaxSpeeds':
diradd = '/MaxSpeeds/'
sumlats=0
sumlons=0
minGB=0
mapping = 'center'
#minLat = [-45,-10,8,20,15]; maxLat = [-25,10,15,35,30]; minLon = [185,120,150,140,290]; maxLon = [220,160,220,220,340] #225
minLat = [ 8, 2, 22,-40]
maxLat = [ 20, 8, 35,-25]
minLon = [155,200,150, 45]
maxLon = [200,280,180, 75]
#Read in files
fstartyr = 1998
fendyr = 2014
precipClimDir = "/home/disk/eos4/rachel/Obs/TRMM/"
precipClimFile = "TRMM_3B42_1998-2014_annmean.nc"
precipRegFile = 'regress_TRMM_3B42_1999-2014_annmean.nc'
precipReg2File = 'regress_TRMM_3B42_1998-2014_annmean.nc'
olrDir = '/home/disk/eos4/rachel/Obs/OLR/'
olrRegFile = 'regress_olr_1999-2013.nc'
filePrecip = xrayOpen(precipClimDir + precipClimFile)
filePrecipReg = xrayOpen(precipClimDir + precipRegFile)
filePrecipReg2 = xrayOpen(precipClimDir + precipReg2File)
fileOlrReg = xrayOpen(olrDir + olrRegFile)
# Open events files
iday = 1
TRMMden = getdenfilename(mapping, 'TRMM', 'Standard', 1998, 2014, iday,
splittype, unit, 4, minGB, tbound1, tbound2,
'Mon',sumlats,sumlons)
TRMM1 = geteventmapdata(iday,'Ann','TDensity',TRMMden,
1998, 2014,
MinLatF,MaxLatF,MinLonF,MaxLonF,
['lat','lon'])
# 2-5 day data
iday = 2
TRMMden = getdenfilename(mapping, 'TRMM', 'Standard', 1998, 2014, iday,
splittype, unit, 4, minGB, tbound1, tbound2,
'Mon',sumlats,sumlons)
TRMM2 = geteventmapdata(iday,'Ann','TDensity',TRMMden,
1998, 2014,
MinLatF,MaxLatF,MinLonF,MaxLonF,
['lat','lon'])
# ERA 1-2 day data
iday = 1
ERAden = getdenfilename(mapping, 'ERAI', 'Standard', 1980, 2014, iday,
splittype, unit, 4, minGB, tbound1, tbound2,
'Mon',sumlats,sumlons)
ERA1 = geteventmapdata(iday,'Ann','TDensity',ERAden,
1980, 2014,
MinLatF,MaxLatF,MinLonF,MaxLonF,
['lat','lon'])
# ERA 2-5 day data
iday = 2
ERAden = getdenfilename(mapping, 'ERAI', 'Standard', 1980, 2014, iday,
splittype, unit, 4, minGB, tbound1, tbound2,
'Mon',sumlats,sumlons)
ERA2 = geteventmapdata(iday,'Ann','TDensity',ERAden,
1980, 2014,
MinLatF,MaxLatF,MinLonF,MaxLonF,
['lat','lon'])
# Get precip data
preciplons = filePrecipReg['pcp'].coords['longitude']
preciplats = filePrecipReg['pcp'].coords['latitude']
dataOLR = fileOlrReg['olr']
lonsOLR = fileOlrReg['olr'].coords['lon']
latsOLR = fileOlrReg['olr'].coords['lat']
# need to make these arrays
if np.amax(preciplons) < 190:
olrtemp1 = preciplons.sel(longitude=slice(np.amin(preciplons),0)) + 360.0
olrtemp2 = preciplons.sel(longitude=slice(0,np.amax(preciplons)))
olrnewlons = xr.concat((olrtemp2,olrtemp1),dim='longitude')
else:
olrnewlons = preciplons
print olrnewlons
OLRnew = dataOLR.sel(lat=preciplats,method='nearest')
OLRnew2 = OLRnew.sel(lon=olrnewlons,method='nearest')
# Now mask out filePrecipReg['pcp'] based on where the sign is not opposite to
# OLRnew2
precipclimin = filePrecipReg['pcp']
origlonsin = filePrecipReg['pcp'].coords['longitude']
if np.amax(origlonsin) < 190:
newprecip2 = precipclimin.sel(longitude=slice(np.amin(origlonsin),0))
newprecip2.coords['longitude'] = newprecip2.coords['longitude'] + 360
precipclim = xr.concat((
precipclimin.sel(longitude=slice(0,np.amax(origlonsin)+1)),
newprecip2),
dim='longitude')
signs = OLRnew2[0,:,:].values * precipclim[0,:,:].values
OLRprecip = precipclim.copy(deep=True)
# Change units if necessary
if OLRprecip.long_name == 'precipitation (mm/hr)':
OLRprecip.values = OLRprecip.values * 24.0 * 365.0
OLRprecip['long_name'] = 'precipitation (mm/yr/yr)'
OLRprecip['units'] = 'mm/yr/yr'
else:
exit('i don\'t know the units, and I\'m not going to guess, sorry!')
for ilon in range(0,len(OLRprecip.longitude)):
for ilat in range(0,len(OLRprecip.latitude)):
if signs[ilat,ilon] > 0:
OLRprecip[0,ilat,ilon] = -9999 #set equal to missing data value
OLRprecip = OLRprecip.rename('mm/yr/yr')
plot = []
print OLRprecip
figtitle = FigDir + 'paper_OLR_Precip_Regressions_1-2day_TRMM_ERA'
wkres = Ngl.Resources()
wkres.wkColorMap = "BlueDarkRed18"
wks_type = "eps"
wks = Ngl.open_wks(wks_type,figtitle,wkres)
res1 = Ngl.Resources()
initcontourplot(res1,MinLatF,MinLonF,MaxLatF,MaxLonF,OLRprecip.latitude.values,OLRprecip.longitude.values)
res1.nglMaximize = False
res1.nglDraw = False
res1.nglFrame = False
res1.sfMissingValueV = -9999
# play with labelbar
res1.lbOrientation = "Vertical"
res1.pmLabelBarWidthF = 0.05
res1.lbBoxEndCapStyle = "TriangleBothEnds"
res1.lbBoxMajorExtentF = 1.0
#res1.lbLabelAutoStride = True
res1.lbAutoManage = False
# including some font heights
res1.lbLabelFontHeightF = 0.007
res1.lbTitleFontHeightF = 0.007
res1.lbTitlePosition = "bottom"
res1.lbTitleExtentF = 0.05
res1.lbTopMarginF = 0.01
res1.lbBottomMarginF = 0.01
res1.tiMainFontHeightF = 0.015
# set position
res1.vpWidthF = 0.75
res1.vpHeightF = 0.22
res1.vpXF = .1
res1.vpYF = .95
res1.cnMinLevelValF = -35
res1.cnMaxLevelValF = 35
res1.cnLevelSpacingF = 10
# turn off grideines
res1.mpGridAndLimbOn = False
print OLRprecip.values.shape
# add label bar title for units
res1.lbTitleOn = True
res1.lbTitleString = "mm/yr~S~2"
res1.tiMainPosition = 'Left'
res1.tiMainOffsetXF = -0.03
res1.tiMainString = 'a.'
plot = (Ngl.contour_map(wks,OLRprecip[0,:,:].values,res1))
# Add boxes
gsres = Ngl.Resources()
gsres.gsLineColor = "Black"
txres = Ngl.Resources()
txres.txFont = "helvetica-bold"
txres.txFontHeightF = 0.012
txres.txJust = "BottomCenter"
for i in range(0,nboxes):
Ngl.add_polyline(wks,plot,[minLons[i],minLons[i]],[minLats[i],maxLats[i]],gsres)
Ngl.add_polyline(wks,plot,[maxLons[i],maxLons[i]],[minLats[i],maxLats[i]],gsres)
Ngl.add_polyline(wks,plot,[minLons[i],maxLons[i]],[minLats[i],minLats[i]],gsres)
Ngl.add_polyline(wks,plot,[minLons[i],maxLons[i]],[maxLats[i],maxLats[i]],gsres)
Ngl.add_text(wks,plot,str(i+1),0.5*(minLons[i] + maxLons[i]),minLats[i] +
0.25 * (maxLats[i] - minLats[i]) ,txres)
# Add label
Ngl.add_text(wks,plot,str(i+1),0.5*(minLons[i] + maxLons[i]),minLats[i] +
0.25 * (maxLats[i] - minLats[i]) ,txres)
Ngl.draw(plot)
# Add other plots
# TRMM line plot
#Now plot plots
res2 = Ngl.Resources()
res2.nglMaximize = False
res2.nglFrame = False
res2.nglDraw = False
res2.xyMarkLineMode = "Markers"
res2.xyMonoMarkLineMode = True
res2.xyMarkerColor = "blue"
res2.xyMarkers = 5
res2.xyMarkerSizeF = 0.005
res2.xyYStyle = "Linear"
res2.xyXStyle = "Linear"
#res2.tiMainFontHeightF = 0.035
#res2.tiYAxisFontHeightF = 0.03
res2.tiXAxisOn = False
#res2.tmXBLabelFontHeightF = 0.03
#res2.tmYLLabelFontHeightF = 0.03
#res2.tmYLMode = "Automatic"
#res2.tmYLFormat = "@6^g"
res2.tiYAxisString = "TRMM"
res2.tiYAxisFontColor = "blue"
# Turn off right hand axis labels
res2.tmYROn = False
res2.tmYRLabelsOn = False
res2.tmYRMinorOn = False
res2.vpWidthF = 0.3
res2.vpHeightF = 0.3
res2.vpXF = .1
res2.vpYF = .65
print TRMM1.year[0].values
res2.trXMinF = ERA1.year[0].values - 1
res2.trXMaxF = ERA1.year[-1].values + 1
res2.tiMainPosition = 'Left'
res2.tiMainOffsetXF = -0.03
res2.tiMainString = 'b.'
plot = (Ngl.xy(wks,TRMM1.year.values,TRMM1.values,res2))
Ngl.add_text(wks,plot,'b',ERA1.year.values[0],Ngl.get_float(plot,"tmYLMajorLengthF"),txres)
# Add 2-5 day data
res22 = Ngl.Resources()
res22.nglDraw = False
res22.nglFrame = False
res22.nglMaximize = False
res22.vpHeightF = res2.vpHeightF
res22.vpWidthF = res2.vpWidthF
res22.vpXF = res2.vpXF
res22.vpYF = res2.vpYF
res22.trXMinF = res2.trXMinF
res22.trXMaxF = res2.trXMaxF
#
# Turn off the bottom and left tickmarks, since we will use the ones from
# the first plot.
#
res22.tmXBOn = False
res22.tmXBLabelsOn = False
res22.tmXBMinorOn = False
res22.tmYLOn = False
res22.tmYLLabelsOn = False
res22.tmYLMinorOn = False
#
# Turn on the right Y labels and tickmarks and move the axis string to
# the right.
#
res22.tmYRLabelsOn = True
res22.tmYROn = True
res22.tmYUseLeft = False
res22.tmYRFormat = "f" # Gets rid of unnecessary trailing zeros
#
# Move the Y axis string to the right.
#
res22.tiYAxisString = "ERA-Interim"
res22.tiYAxisSide = "Right"
res22.tiYAxisFontColor = "red"
res22.tiXAxisFontHeightF = Ngl.get_float(plot,"tiXAxisFontHeightF")
res22.xyMarkerColor = "red"
res22.xyMarkers = '.'
res22.xyMarkLineMode = "Markers"
res22.xyMonoMarkLineMode = True
#
# Make sure the font heights and tickmark lengths are the same as
# the first plot.
#
res22.tmYRLabelFontHeightF = Ngl.get_float(plot,"tmYLLabelFontHeightF")
res22.tmYRMajorLengthF = Ngl.get_float(plot,"tmYLMajorLengthF")
Ngl.draw(plot)
if plotboth: # if want to include 1-2 AND 2-5 day events
plot2 = (Ngl.xy(wks,ERA1.year.values,ERA1.values,res22))
Ngl.draw(plot2)
# Repeat for ERA-I data next to first plot
res2.vpXF = .6
res2.vpYF = .65
res22.vpXF = .6
res22.vpYF = .65
res2.trXMinF = ERA1.year[0].values - 1
res2.trXMaxF = ERA1.year[-1].values + 1
res22.trXMinF = res2.trXMinF
res22.trXMaxF = res2.trXMaxF
res2.tiMainString = 'c.'
plot = (Ngl.xy(wks,TRMM2.year.values,TRMM2.values,res2))
res22.tmYRLabelFontHeightF = Ngl.get_float(plot,"tmYLLabelFontHeightF")
res22.tmYRMajorLengthF = Ngl.get_float(plot,"tmYLMajorLengthF")
A = range(1980,2001+1)
linreg = ERA2.sel(year=slice(1980,2001)).values
print A
print linreg.shape
regress = stats.linregress(A,linreg)
print regress
Ngl.draw(plot)
if plotboth:
plot2 = (Ngl.xy(wks,ERA2.year.values,ERA2.values,res22))
Ngl.draw(plot2)
# Maximize and draw
#psres = True
#Ngl.maximize_plot(wks,plot,psres)
Ngl.frame(wks)
quit()
|
mit
|
mkomeichi/BuildingMLSystemsWithPython
|
ch11/demo_mds.py
|
25
|
3724
|
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
import numpy as np
from matplotlib import pylab
from mpl_toolkits.mplot3d import Axes3D
from sklearn import linear_model, manifold, decomposition, datasets
logistic = linear_model.LogisticRegression()
from utils import CHART_DIR
np.random.seed(3)
# all examples will have three classes in this file
colors = ['r', 'g', 'b']
markers = ['o', 6, '*']
def plot_demo_1():
X = np.c_[np.ones(5), 2 * np.ones(5), 10 * np.ones(5)].T
y = np.array([0, 1, 2])
fig = pylab.figure(figsize=(10, 4))
ax = fig.add_subplot(121, projection='3d')
ax.set_axis_bgcolor('white')
mds = manifold.MDS(n_components=3)
Xtrans = mds.fit_transform(X)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], Xtrans[y == cl][:, 2], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on example data set in 3 dimensions")
ax.view_init(10, -15)
mds = manifold.MDS(n_components=2)
Xtrans = mds.fit_transform(X)
ax = fig.add_subplot(122)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on example data set in 2 dimensions")
filename = "mds_demo_1.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_iris_mds():
iris = datasets.load_iris()
X = iris.data
y = iris.target
# MDS
fig = pylab.figure(figsize=(10, 4))
ax = fig.add_subplot(121, projection='3d')
ax.set_axis_bgcolor('white')
mds = manifold.MDS(n_components=3)
Xtrans = mds.fit_transform(X)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], Xtrans[y == cl][:, 2], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on Iris data set in 3 dimensions")
ax.view_init(10, -15)
mds = manifold.MDS(n_components=2)
Xtrans = mds.fit_transform(X)
ax = fig.add_subplot(122)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on Iris data set in 2 dimensions")
filename = "mds_demo_iris.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
# PCA
fig = pylab.figure(figsize=(10, 4))
ax = fig.add_subplot(121, projection='3d')
ax.set_axis_bgcolor('white')
pca = decomposition.PCA(n_components=3)
Xtrans = pca.fit(X).transform(X)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], Xtrans[y == cl][:, 2], c=color, marker=marker, edgecolor='black')
pylab.title("PCA on Iris data set in 3 dimensions")
ax.view_init(50, -35)
pca = decomposition.PCA(n_components=2)
Xtrans = pca.fit_transform(X)
ax = fig.add_subplot(122)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], c=color, marker=marker, edgecolor='black')
pylab.title("PCA on Iris data set in 2 dimensions")
filename = "pca_demo_iris.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
if __name__ == '__main__':
plot_demo_1()
plot_iris_mds()
|
mit
|
centowen/cudaGrid
|
grid.py
|
1
|
2475
|
from __future__ import division
from ctypes import cdll, c_double, c_float, c_int, c_char_p, POINTER
import numpy as np
from math import pi
import matplotlib.pylab as pl
import os
import os.path
from numpyctypes import c_ndarray
def main():
print(os.path.join(__path__[0], 'libgrid.so'))
lib = cdll.LoadLibrary(os.path.join(__path__[0], 'libgrid.so'))
N = 256
# N = 4096
arcsec = 1./180/3600*pi
cell = 0.5*arcsec
x0 = 0.928122246
y0 = -27.638/180*pi
# x0 = 0.
# y0 = 0.
# x0 = 0.9301306097278084
# y0 = -0.4854060361613893
# x0 = 2*arcsec
# x0 = 1.
grid = lib.c_grid
grid.restype = None
# grid.argtype = [c_char_p, POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_double, c_int]
vis_real = np.zeros((6,N,N))
vis_imag = np.zeros((6,N,N))
weight = np.zeros((6,N,N))
pb = np.zeros((6,N,N))
c_vis_real = c_ndarray(vis_real, dtype=np.double, ndim=3)
c_vis_imag = c_ndarray(vis_imag, dtype=np.double, ndim=3)
c_weight = c_ndarray(weight, dtype=np.double, ndim=3)
c_pb = c_ndarray(pb, dtype=np.double, ndim=3)
# grid(c_char_p(b'/data/lindroos/ecdfs_selfcal.ms'), c_vis_real, c_vis_imag, c_weight,
# c_pb, c_double(cell), c_float(x0), c_float(y0), c_int(1))
# grid(c_char_p(b'/data/lindroos/ecdfs_drg_stacked.ms'), c_vis_real, c_vis_imag, c_weight,
# c_pb, c_double(cell), c_float(x0), c_float(y0), c_int(1))
grid(c_char_p(b'/data/lindroos/ecdfs_test.ms'), c_vis_real, c_vis_imag, c_weight,
c_pb, c_double(cell), c_float(x0), c_float(y0), c_int(1))
# grid(c_char_p(b'/data/lindroos/ecdfs_raw_sorted.ms'), c_vis_real, c_vis_imag, c_weight,
# c_pb, c_double(cell), c_float(x0), c_float(y0), c_int(1))
# grid(c_char_p(b'/data/aless_drg.ms'), c_vis_real, c_vis_imag, c_weight,
# c_pb, c_double(cell), c_float(x0), c_float(y0), c_int(0))
# pl.ion()
# pl.imshow(np.real(vis).transpose(), interpolation='nearest', origin='lower')
vis = vis_real+1j*vis_imag
# pl.clf()
# pl.imshow(weight.transpose(), interpolation='nearest', origin='lower')
# pl.imshow(weight.transpose(), interpolation='nearest', origin='lower')
# pl.colorbar()
# pl.savefig('test.png')
# pl.show()
np.save('data.npy', vis)
np.save('weight.npy', weight)
if __name__ == '__main__':
__path__ = [os.path.dirname(os.path.realpath(__file__))]
main()
|
gpl-2.0
|
mxjl620/scikit-learn
|
sklearn/linear_model/tests/test_passive_aggressive.py
|
169
|
8809
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_class_weights():
# Test class weights.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100, class_weight=None,
random_state=100)
clf.fit(X2, y2)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100,
class_weight={1: 0.001},
random_state=100)
clf.fit(X2, y2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_partial_fit_weight_class_balanced():
# partial_fit with class_weight='balanced' not supported
clf = PassiveAggressiveClassifier(class_weight="balanced")
assert_raises(ValueError, clf.partial_fit, X, y, classes=np.unique(y))
def test_equal_class_weight():
X2 = [[1, 0], [1, 0], [0, 1], [0, 1]]
y2 = [0, 0, 1, 1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=1000, class_weight=None)
clf.fit(X2, y2)
# Already balanced, so "balanced" weights should have no effect
clf_balanced = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight="balanced")
clf_balanced.fit(X2, y2)
clf_weighted = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X2, y2)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2)
def test_wrong_class_weight_label():
# ValueError due to wrong class_weight label.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight={0: 0.5})
assert_raises(ValueError, clf.fit, X2, y2)
def test_wrong_class_weight_format():
# ValueError due to wrong class_weight argument type.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight=[0.5])
assert_raises(ValueError, clf.fit, X2, y2)
clf = PassiveAggressiveClassifier(class_weight="the larch")
assert_raises(ValueError, clf.fit, X2, y2)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
|
bsd-3-clause
|
timcera/wdmtoolbox
|
tests/test_importdata.py
|
1
|
5732
|
# -*- coding: utf-8 -*-
"""
test_createnewdsn
----------------------------------
Tests for `tstoolbox` module.
"""
import os
import sys
import tempfile
try:
from cStringIO import StringIO
except:
from io import StringIO
from unittest import TestCase
from pandas.testing import assert_frame_equal
from tstoolbox import tstoolbox, tsutils
from wdmtoolbox import wdmtoolbox
from wdmtoolbox.wdmutil import WDMError
def capture(func, *args, **kwds):
sys.stdout = StringIO() # capture output
out = func(*args, **kwds)
out = sys.stdout.getvalue() # release output
try:
out = bytes(out, "utf-8")
except:
pass
return out
class TestDescribe(TestCase):
def setUp(self):
self.fd, self.wdmname = tempfile.mkstemp(suffix=".wdm")
os.close(self.fd)
def tearDown(self):
os.remove(self.wdmname)
def test_tstep(self):
wdmtoolbox.createnewwdm(self.wdmname, overwrite=True)
wdmtoolbox.createnewdsn(self.wdmname, 101, tcode=2, base_year=1970, tsstep=15)
wdmtoolbox.csvtowdm(self.wdmname, 101, input_ts="tests/nwisiv_02246000.csv")
ret1 = wdmtoolbox.extract(self.wdmname, 101)
ret2 = wdmtoolbox.extract("{0},101".format(self.wdmname))
assert_frame_equal(ret1, ret2, check_index_type=False)
ret3 = tstoolbox.read("tests/nwisiv_02246000.csv")
ret3.index = ret3.index.tz_localize(None)
ret3 = tsutils.asbestfreq(ret3)
ret1.columns = ["02246000_iv_00060"]
assert_frame_equal(ret1, ret3, check_index_type=False)
def test_extract_args(self):
wdmtoolbox.createnewwdm(self.wdmname, overwrite=True)
wdmtoolbox.createnewdsn(self.wdmname, 101, tcode=2, base_year=1970, tsstep=15)
wdmtoolbox.csvtowdm(self.wdmname, 101, input_ts="tests/nwisiv_02246000.csv")
with self.assertRaisesRegex(ValueError, "The only allowed keywords are"):
ret1 = wdmtoolbox.extract(self.wdmname, 101, ph=True)
def test_listdsns_verify(self):
wdmtoolbox.createnewwdm(self.wdmname, overwrite=True)
wdmtoolbox.createnewdsn(self.wdmname, 101, tcode=2, base_year=1970, tsstep=15)
wdmtoolbox.csvtowdm(self.wdmname, 101, input_ts="tests/nwisiv_02246000.csv")
ldsns = wdmtoolbox.listdsns(self.wdmname)
def test_negative_dsn(self):
wdmtoolbox.createnewwdm(self.wdmname, overwrite=True)
wdmtoolbox.createnewdsn(self.wdmname, 101, tcode=2, base_year=1970, tsstep=15)
wdmtoolbox.csvtowdm(self.wdmname, 101, input_ts="tests/nwisiv_02246000.csv")
with self.assertRaisesRegex(
WDMError, "WDM error: data set number out of valid range"
):
ret1 = wdmtoolbox.extract(self.wdmname, 0)
def test_out_of_bounds_dsn(self):
wdmtoolbox.createnewwdm(self.wdmname, overwrite=True)
wdmtoolbox.createnewdsn(self.wdmname, 101, tcode=2, base_year=1970, tsstep=15)
wdmtoolbox.csvtowdm(self.wdmname, 101, input_ts="tests/nwisiv_02246000.csv")
with self.assertRaisesRegex(
WDMError, "WDM error: data set number out of valid range"
):
ret1 = wdmtoolbox.extract(self.wdmname, 32001)
def test_dsn_not_in_wdm(self):
wdmtoolbox.createnewwdm(self.wdmname, overwrite=True)
wdmtoolbox.createnewdsn(self.wdmname, 101, tcode=2, base_year=1970, tsstep=15)
wdmtoolbox.csvtowdm(self.wdmname, 101, input_ts="tests/nwisiv_02246000.csv")
with self.assertRaisesRegex(WDMError, "error code -81"):
ret1 = wdmtoolbox.extract(self.wdmname, 32000)
def test_start_date(self):
wdmtoolbox.createnewwdm(self.wdmname, overwrite=True)
wdmtoolbox.createnewdsn(self.wdmname, 101, tcode=2, base_year=1970, tsstep=15)
wdmtoolbox.csvtowdm(self.wdmname, 101, input_ts="tests/nwisiv_02246000.csv")
ret1 = wdmtoolbox.extract(self.wdmname, 101, start_date="2014-02-21 16:00:00")
ret3 = tstoolbox.read(
"tests/nwisiv_02246000.csv", start_date="2014-02-21 16:00:00"
)
ret3.index = ret3.index.tz_localize(None)
ret3 = tsutils.asbestfreq(ret3)
ret1.columns = ["02246000_iv_00060"]
assert_frame_equal(ret1, ret3, check_index_type=False)
def test_end_date(self):
wdmtoolbox.createnewwdm(self.wdmname, overwrite=True)
wdmtoolbox.createnewdsn(self.wdmname, 101, tcode=2, base_year=1970, tsstep=15)
wdmtoolbox.csvtowdm(self.wdmname, 101, input_ts="tests/nwisiv_02246000.csv")
ret1 = wdmtoolbox.extract(self.wdmname, 101, end_date="2014-02-22 11:00:00")
ret3 = tstoolbox.read(
"tests/nwisiv_02246000.csv", end_date="2014-02-22 11:00:00"
)
ret3.index = ret3.index.tz_localize(None)
ret3 = tsutils.asbestfreq(ret3)
ret1.columns = ["02246000_iv_00060"]
assert_frame_equal(ret1, ret3, check_index_type=False)
def test_dates(self):
wdmtoolbox.createnewwdm(self.wdmname, overwrite=True)
wdmtoolbox.createnewdsn(self.wdmname, 101, tcode=2, base_year=1970, tsstep=15)
wdmtoolbox.csvtowdm(self.wdmname, 101, input_ts="tests/nwisiv_02246000.csv")
ret1 = wdmtoolbox.extract(
self.wdmname,
101,
start_date="2014-02-21 16:00:00",
end_date="2014-02-22 11:00:00",
)
ret3 = tstoolbox.read(
"tests/nwisiv_02246000.csv",
start_date="2014-02-21 16:00:00",
end_date="2014-02-22 11:00:00",
)
ret3.index = ret3.index.tz_localize(None)
ret3 = tsutils.asbestfreq(ret3)
ret1.columns = ["02246000_iv_00060"]
assert_frame_equal(ret1, ret3, check_index_type=False)
|
bsd-3-clause
|
shortlab/hognose
|
plotting/enhancement.py
|
1
|
5336
|
# -*- coding: utf-8 -*-
"""
"""
import sys , os, getopt, traceback # First import required modules
import numpy as np # Operating system modules
import matplotlib.pyplot as plt # Numerical python
plt.switch_backend('agg')
import pylab as py
import scipy.optimize # needed for trendline calcs
#plt.ion()
#plt.show()
Kammenzind2016T=np.array([270,310,350])
Kammenzind2016f=np.array([40,30,3.5])
MATPROT=np.array([280,340,400])
MATPROf=np.array([2.5,1.5,1])
Seibold2002T=np.array([310,335])
Seibold2002f=np.array([4,4])
Seibold2002bT=np.array([349.5,350.5])
Seibold2002bf=np.array([3.5,3.5])
Seibold2002fT=np.array([291.7,293.5,296.4,299.3,315.4,317.4,317.4,320.7,322.0])
Seibold2002ff=np.array([15.6,17.7,16.9,16.8,8.9,11.6,8.4,7.4,8.7])
Seibold2002fT2=np.array([325.1,316.0,318.0,318.0,321.1,322.5,326.1,327.7,325.6])
Seibold2002ff2=np.array([5.5,5.5,5.0,4.6,3.6,3.6,3.1,3.8,4.8])
Seibold2002fT3=np.array([329.0,319.7])
Seibold2002ff3=np.array([4.8,3.5])
fig = plt.figure(figsize = (13,8))
axe = fig.add_subplot(111)
axe.tick_params(labelsize=18)
axe.plot(Kammenzind2016T,Kammenzind2016f,'>',markersize=12,label='Kammenzind et al. 2016')
axe.plot(MATPROT,MATPROf,'<',markersize=12,label='MATPRO using '+r'$T_{clad,surface}$')
#axe.plot(Seibold2002T,Seibold2002f,'r-',linewidth=5,label='Seibold et al. 2002')
#axe.plot(Seibold2002bT,Seibold2002bf,'r-',linewidth=5)
axe.plot(Seibold2002fT,Seibold2002ff,'ro',markersize=8,label='Seibold et al. 2002')
axe.plot(Seibold2002fT2,Seibold2002ff2,'ro',markersize=8)
axe.plot(Seibold2002fT3,Seibold2002ff3,'ro',markersize=8)
#axe.plot(time4[0::1],Oxide_thickness4[0::1],'-s',label='value = 0.8')
axe.legend(loc='best') #,ncol=2)
plt.xlim(265,405)
plt.ylim(0,87.5)#41)
plt.ylabel('Reactor Corrosion Rate / Autoclave Corrosion Rate',fontsize=20)
plt.xlabel('Temperature ('+r'$^\circ$'+'C)',fontsize=20)
plt.savefig('fig-enhancement.png',bbox_inches='tight')
HOGNOSEt=np.array([270,290,310,330])
HOGNOSEf=np.array([84.9,42.1,5.5,2.1])
HOGNOSEon=1
if HOGNOSEon==True:
fig = plt.figure(figsize = (13,10))
axe = fig.add_subplot(111)
axe.tick_params(labelsize=28)
fig.subplots_adjust(left = 0.11,top=0.97,right = 0.96)
axe.plot(Kammenzind2016T,Kammenzind2016f,'>',markersize=18,label='Kammenzind et al. 2016')
axe.plot(MATPROT,MATPROf,'<',markersize=18,label='MATPRO using '+r'$T_{clad,surface}$')
axe.plot(Seibold2002fT,Seibold2002ff,'ro',markersize=18,label='Seibold et al. 2002')
axe.plot(Seibold2002fT2,Seibold2002ff2,'ro',markersize=18)
axe.plot(Seibold2002fT3,Seibold2002ff3,'ro',markersize=18)
axe.plot(HOGNOSEt,HOGNOSEf,'k*-',markersize=28,linewidth=6,label='HOGNOSE results at '+r'$\phi = 1.0 \cdot 10^{14} n/cm^2-s$')
axe.xaxis.set_tick_params(length=8,width=2)
axe.yaxis.set_tick_params(length=8,width=2)
for axis in ['top','bottom','left','right']:
axe.spines[axis].set_linewidth(2)
axe.legend(loc='best',fontsize=25) #,ncol=2)
plt.xlim(265,352)
plt.ylim(0,87.5)
plt.ylabel('Enhancement Factor',fontsize=35)
plt.xlabel('Temperature ('+r'$^\circ$'+'C)',fontsize=35)
plt.savefig('fig-HOGNOSE-enhancement.png',dpi=500,bbox_inches='tight')
aftertransition=10
if aftertransition==True:
f270phi=np.array([0.25,0.4,0.7,1.0,1.6])
f270f=np.array([8.5,31.2,81.4,84.9,84.9])
f290phi=np.array([0.25,0.5,0.7,1.0,1.6])
f290f=np.array([2.6,6.5,12.5,42.1,74.3])
f310phi=np.array([0.25,0.5,1.0,1.6])
f310f=np.array([1.4,2.3,5.5,14.4])
f330phi=np.array([0.25,0.5,1.0,1.6])
f330f=np.array([1.1,1.3,2.1,3.8])
else:
f270phi=np.array([0,0.25,0.4,0.7,1.0,1.6])
f270f=np.array([1,4.4,6.9,8.9,9.6,10.3])
f290phi=np.array([0,0.25,0.5,0.7,1.0,1.6])
f290f=np.array([1,2.1,3.7,5.0,7.0,8.2])
f310phi=np.array([0,0.25,0.5,1.0,1.6])
f310f=np.array([1,1.3,1.9,3.3,5.1])
f330phi=np.array([0,0.25,0.5,1.0,1.6])
f330f=np.array([1,1.1,1.3,1.8,2.6])
HOGNOSEonly=1
if HOGNOSEonly==True:
fig = plt.figure(figsize = (13,10))
axe = fig.add_subplot(111)
axe.tick_params(labelsize=28)
fig.subplots_adjust(left = 0.11,top=0.97,right = 0.96)
axe.plot(f270phi,f270f,'ro-',linewidth=4,markersize=18,label='270'+r'$^\circ$'+'C')
axe.plot(f290phi,f290f,'b>-',linewidth=4,markersize=18,label='290'+r'$^\circ$'+'C')
axe.plot(f310phi,f310f,'g<-',linewidth=4,markersize=18,label='310'+r'$^\circ$'+'C')
axe.plot(f330phi,f330f,'k*-',linewidth=4,markersize=28,label='330'+r'$^\circ$'+'C')
axe.legend(loc='best',fontsize=25) #,ncol=2)
axe.xaxis.set_tick_params(length=8,width=2)
axe.yaxis.set_tick_params(length=8,width=2)
for axis in ['top','bottom','left','right']:
axe.spines[axis].set_linewidth(2)
plt.xlim(0,1.75)
axe.xaxis.set_ticks(np.arange(0,1.75,0.25))
if aftertransition==True:
plt.ylim(0,87.5)
else:
plt.ylim(1,11)
axe.yaxis.set_ticks(np.arange(1,11, 1))
plt.ylabel('Enhancement Factor',fontsize=35)
plt.xlabel('Neutron Flux '+r'$\phi $'+' (in multiples of '+r'$10^{14} n/cm^2-s$)',fontsize=35)
plt.savefig('fig-overallHOGNOSE-enhancement.png',dpi=500,bbox_inches='tight')
|
lgpl-2.1
|
KNMI/VERCE
|
verce-hpc-pe/src/test/misfit_preprocessing/misfit_preprocess_new.py
|
2
|
9232
|
# For executing the worklfow, you need to have first the correct paths of the
# data in the misfit_input.jsn file
# Type the following command, for running the workflow.
# python -m dispel4py.new.processor simple
# dispel4py/test/seismo/misfit_preprocess.py -f
# dispel4py/test/seismo/misfit_input.jsn
import glob
import inspect
import json
import os
import sys
import socket
import numpy as np
import obspy
# from dispel4py.seismo.seismo import *
from obspy.core.event import readEvents, ResourceIdentifier
from obspy.signal.invsim import c_sac_taper
from obspy.signal.util import _npts2nfft
import scipy.signal
import matplotlib.pyplot as plt
import matplotlib.dates as mdt
from dispel4py.core import GenericPE
from dispel4py.base import IterativePE, ConsumerPE, create_iterative_chain
import gc
# from dispel4py.workflow_graph import WorkflowGraph
def get_event_time(event):
"""
Extract origin time from event XML file.
"""
if not isinstance(event, obspy.core.event.Event):
event = obspy.readEvents(event)[0]
origin = event.preferred_origin() or event.origins[0]
return origin.time
def get_synthetics(synts, event_time):
if isinstance(synts, list):
file_names = synts
else:
file_names = glob.glob(synts)
st = obspy.Stream()
for name in file_names:
st += obspy.read(name)
# The start time of the synthetics might not be absolute. Grant a tolerance
# of 10 seconds.
if -10.0 <= st[0].stats.starttime.timestamp <= 0.0:
for tr in st:
offset = tr.stats.starttime - obspy.UTCDateTime(0)
tr.stats.starttime = event_time + offset
return st
def read_stream(data_files, sxml, event_file, event_id):
print data_files
stream = obspy.read(data_files)
stations = obspy.read_inventory(sxml, format="STATIONXML")
stream.attach_response(stations)
events = readEvents(event_file)
event = None
resource_id = ResourceIdentifier(event_id)
for evt in events:
if evt.resource_id == resource_id:
event = evt
if event is None:
event = events[0]
return stream, stations, event
def get_event_coordinates(event):
if not isinstance(event, obspy.core.event.Event):
event = obspy.readEvents(event)[0]
origin = event.preferred_origin() or event.origins[0]
return origin.longitude, origin.latitude
def zerophase_chebychev_lowpass_filter(trace, freqmax):
"""
Custom Chebychev type two zerophase lowpass filter useful for
decimation filtering.
This filter is stable up to a reduction in frequency with a factor of
10. If more reduction is desired, simply decimate in steps.
Partly based on a filter in ObsPy.
:param trace: The trace to be filtered.
:param freqmax: The desired lowpass frequency.
Will be replaced once ObsPy has a proper decimation filter.
"""
# rp - maximum ripple of passband, rs - attenuation of stopband
rp, rs, order = 1, 96, 1e99
ws = freqmax / (trace.stats.sampling_rate * 0.5) # stop band frequency
wp = ws # pass band frequency
while True:
if order <= 12:
break
wp *= 0.99
order, wn = scipy.signal.cheb2ord(wp, ws, rp, rs, analog=0)
b, a = scipy.signal.cheby2(order, rs, wn, btype="low", analog=0,
output="ba")
# Apply twice to get rid of the phase distortion.
trace.data = scipy.signal.filtfilt(b, a, trace.data)
def aliasing_filter(tr, target_sampling_rate):
while True:
decimation_factor = int(1.0 / target_sampling_rate / tr.stats.delta)
# Decimate in steps for large sample rate reductions.
if decimation_factor > 8:
decimation_factor = 8
if decimation_factor > 1:
new_nyquist = tr.stats.sampling_rate / 2.0 / float(
decimation_factor)
zerophase_chebychev_lowpass_filter(tr, new_nyquist)
tr.decimate(factor=decimation_factor, no_filter=True)
else:
break
def sync_cut(data, synth, lenwin=None):
"""
return cutted copy of data and synth
:param data: Multicomponent stream of data.
:param synth: Multicomponent stream of synthetics.
:param sampling_rate: Desired sampling rate.
"""
sampling_rate = min([tr.stats.sampling_rate for tr in (data + synth)])
for tr in data:
aliasing_filter(tr, sampling_rate)
for tr in synth:
aliasing_filter(tr, sampling_rate)
starttime = max([tr.stats.starttime for tr in (data + synth)])
endtime = min([tr.stats.endtime for tr in (data + synth)])
if lenwin:
if (endtime - starttime) < lenwin:
raise ValueError("lenwin is larger than the data allows.")
endtime = starttime + float(lenwin)
npts = int((endtime - starttime) * sampling_rate)
data.interpolate(sampling_rate=sampling_rate, method="cubic",
starttime=starttime, npts=npts)
synth.interpolate(sampling_rate=sampling_rate, method="cubic",
starttime=starttime, npts=npts)
return data, synth
def rotate_data(stream, stations, event):
"""
Rotates the data to ZRT.
"""
n = stream.select(component='N')
e = stream.select(component='E')
stations = stations.select(network=stream[0].stats.network,
station=stream[0].stats.station)
if len(e) and len(n):
# print "COORD:"+str(get_event_coordinates(event))
lon_event, lat_event = get_event_coordinates(event)
lon_station, lat_station = stations[0][0].longitude, \
stations[0][0].latitude
dist, az, baz = obspy.core.util.geodetics.base.gps2DistAzimuth(
float(lat_event), float(lon_event), float(lat_station),
float(lon_station))
stream.rotate('NE->RT', baz)
else:
raise ValueError("Could not rotate data")
return stream
def remove_response(stream, pre_filt=(0.01, 0.02, 8.0, 10.0),
response_output="DISP"):
"""
Removes the instrument response.
Assumes stream.attach_response has been called before.
"""
stream.remove_response(pre_filt=pre_filt,
output=response_output,
zero_mean=False, taper=False)
return stream
def pre_filter(stream, pre_filt=(0.02, 0.05, 8.0, 10.0)):
"""
Applies the same filter as remove_response without actually removing the
response.
"""
for tr in stream:
data = tr.data.astype(np.float64)
nfft = _npts2nfft(len(data))
fy = 1.0 / (tr.stats.delta * 2.0)
freqs = np.linspace(0, fy, nfft // 2 + 1)
# Transform data to Frequency domain
data = np.fft.rfft(data, n=nfft)
data *= c_sac_taper(freqs, flimit=pre_filt)
data[-1] = abs(data[-1]) + 0.0j
# transform data back into the time domain
data = np.fft.irfft(data)[0:tr.stats.npts]
# assign processed data and store processing information
tr.data = data
return stream
def detrend(stream, method='linear'):
stream.detrend(method)
return stream
def taper(stream, max_percentage=0.05, taper_type="hann"):
stream.taper(max_percentage=max_percentage, type=taper_type)
return stream
def filter_lowpass(stream, frequency, corners, zerophase):
stream.filter("lowpass", freq=frequency, corners=corners,
zerophase=zerophase)
return stream
def filter_highpass(stream, frequency, corners, zerophase):
stream.filter("highpass", freq=frequency, corners=corners,
zerophase=zerophase)
return stream
def filter_bandpass(stream, min_frequency, max_frequency, corners, zerophase):
stream.filter("bandpass", freqmin=min_frequency,
freqmax=max_frequency, corners=corners,
zerophase=zerophase)
return stream
def plot_stream(stream, output_dir, source, tag, seq_idx=0):
try:
stats = stream[0].stats
filename = source + "-%s.%s.%s.%s.png" % (stats['network'], stats['station'], tag, seq_idx)
path = os.environ['STAGED_DATA'] + '/' + output_dir
if not os.path.exists(path):
try:
os.makedirs(path)
except:
pass
dest = os.path.join(path, filename)
stream.plot(outfile=dest)
prov = {'location': "file://" + socket.gethostname() + "/" + dest, 'format': 'image/png',
'metadata': {'prov:type': tag, 'source': source}}
return stream, prov
except:
traceback.print_exc()
def store_stream(stream, output_dir, source, tag, seq_idx=0):
stats = stream[0].stats
filename = source + "-%s.%s.%s.%s.seed" % (
stats['network'], stats['station'], tag, seq_idx)
path = os.environ['STAGED_DATA'] + '/' + output_dir
if not os.path.exists(path):
os.makedirs(path)
dest = os.path.join(path, filename)
stream.write(dest, format='MSEED')
prov = {'location': "file://" + socket.gethostname() + "/" + dest, 'format': 'application/octet-stream',
'metadata': {'prov:type': tag}}
return stream, prov
|
mit
|
shicai/cuda-convnet2
|
shownet.py
|
180
|
18206
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from tarfile import TarFile, TarInfo
from matplotlib import pylab as pl
import numpy as n
import getopt as opt
from python_util.util import *
from math import sqrt, ceil, floor
from python_util.gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from python_util.options import *
from PIL import Image
from time import sleep
class ShowNetError(Exception):
pass
class ShowConvNet(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
def init_data_providers(self):
self.need_gpu = self.op.get_value('show_preds')
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
def init_model_state(self):
if self.op.get_value('show_preds'):
self.softmax_name = self.op.get_value('show_preds')
def init_model_lib(self):
if self.need_gpu:
ConvNet.init_model_lib(self)
def plot_cost(self):
if self.show_cost not in self.train_outputs[0][0]:
raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
# print self.test_outputs
train_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.train_outputs]
test_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.test_outputs]
if self.smooth_test_errors:
test_errors = [sum(test_errors[max(0,i-len(self.test_batch_range)):i])/(i-max(0,i-len(self.test_batch_range))) for i in xrange(1,len(test_errors)+1)]
numbatches = len(self.train_batch_range)
test_errors = n.row_stack(test_errors)
test_errors = n.tile(test_errors, (1, self.testing_freq))
test_errors = list(test_errors.flatten())
test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
test_errors = test_errors[:len(train_errors)]
numepochs = len(train_errors) / float(numbatches)
pl.figure(1)
x = range(0, len(train_errors))
pl.plot(x, train_errors, 'k-', label='Training set')
pl.plot(x, test_errors, 'r-', label='Test set')
pl.legend()
ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
epoch_label_gran = int(ceil(numepochs / 20.))
epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) if numepochs >= 10 else epoch_label_gran
ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))
pl.xticks(ticklocs, ticklabels)
pl.xlabel('Epoch')
# pl.ylabel(self.show_cost)
pl.title('%s[%d]' % (self.show_cost, self.cost_idx))
# print "plotted cost"
def make_filter_fig(self, filters, filter_start, fignum, _title, num_filters, combine_chans, FILTERS_PER_ROW=16):
MAX_ROWS = 24
MAX_FILTERS = FILTERS_PER_ROW * MAX_ROWS
num_colors = filters.shape[0]
f_per_row = int(ceil(FILTERS_PER_ROW / float(1 if combine_chans else num_colors)))
filter_end = min(filter_start+MAX_FILTERS, num_filters)
filter_rows = int(ceil(float(filter_end - filter_start) / f_per_row))
filter_pixels = filters.shape[1]
filter_size = int(sqrt(filters.shape[1]))
fig = pl.figure(fignum)
fig.text(.5, .95, '%s %dx%d filters %d-%d' % (_title, filter_size, filter_size, filter_start, filter_end-1), horizontalalignment='center')
num_filters = filter_end - filter_start
if not combine_chans:
bigpic = n.zeros((filter_size * filter_rows + filter_rows + 1, filter_size*num_colors * f_per_row + f_per_row + 1), dtype=n.single)
else:
bigpic = n.zeros((3, filter_size * filter_rows + filter_rows + 1, filter_size * f_per_row + f_per_row + 1), dtype=n.single)
for m in xrange(filter_start,filter_end ):
filter = filters[:,:,m]
y, x = (m - filter_start) / f_per_row, (m - filter_start) % f_per_row
if not combine_chans:
for c in xrange(num_colors):
filter_pic = filter[c,:].reshape((filter_size,filter_size))
bigpic[1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size*num_colors) * x + filter_size*c:1 + (1 + filter_size*num_colors) * x + filter_size*(c+1)] = filter_pic
else:
filter_pic = filter.reshape((3, filter_size,filter_size))
bigpic[:,
1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size) * x:1 + (1 + filter_size) * x + filter_size] = filter_pic
pl.xticks([])
pl.yticks([])
if not combine_chans:
pl.imshow(bigpic, cmap=pl.cm.gray, interpolation='nearest')
else:
bigpic = bigpic.swapaxes(0,2).swapaxes(0,1)
pl.imshow(bigpic, interpolation='nearest')
def plot_filters(self):
FILTERS_PER_ROW = 16
filter_start = 0 # First filter to show
if self.show_filters not in self.layers:
raise ShowNetError("Layer with name '%s' not defined by given convnet." % self.show_filters)
layer = self.layers[self.show_filters]
filters = layer['weights'][self.input_idx]
# filters = filters - filters.min()
# filters = filters / filters.max()
if layer['type'] == 'fc': # Fully-connected layer
num_filters = layer['outputs']
channels = self.channels
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
elif layer['type'] in ('conv', 'local'): # Conv layer
num_filters = layer['filters']
channels = layer['filterChannels'][self.input_idx]
if layer['type'] == 'local':
filters = filters.reshape((layer['modules'], channels, layer['filterPixels'][self.input_idx], num_filters))
filters = filters[:, :, :, self.local_plane] # first map for now (modules, channels, pixels)
filters = filters.swapaxes(0,2).swapaxes(0,1)
num_filters = layer['modules']
# filters = filters.swapaxes(0,1).reshape(channels * layer['filterPixels'][self.input_idx], num_filters * layer['modules'])
# num_filters *= layer['modules']
FILTERS_PER_ROW = layer['modulesX']
else:
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
# Convert YUV filters to RGB
if self.yuv_to_rgb and channels == 3:
R = filters[0,:,:] + 1.28033 * filters[2,:,:]
G = filters[0,:,:] + -0.21482 * filters[1,:,:] + -0.38059 * filters[2,:,:]
B = filters[0,:,:] + 2.12798 * filters[1,:,:]
filters[0,:,:], filters[1,:,:], filters[2,:,:] = R, G, B
combine_chans = not self.no_rgb and channels == 3
# Make sure you don't modify the backing array itself here -- so no -= or /=
if self.norm_filters:
#print filters.shape
filters = filters - n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).mean(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1))
filters = filters / n.sqrt(n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).var(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1)))
#filters = filters - n.tile(filters.min(axis=0).min(axis=0), (3, filters.shape[1], 1))
#filters = filters / n.tile(filters.max(axis=0).max(axis=0), (3, filters.shape[1], 1))
#else:
filters = filters - filters.min()
filters = filters / filters.max()
self.make_filter_fig(filters, filter_start, 2, 'Layer %s' % self.show_filters, num_filters, combine_chans, FILTERS_PER_ROW=FILTERS_PER_ROW)
def plot_predictions(self):
epoch, batch, data = self.get_next_batch(train=False) # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS if not self.save_preds else data[0].shape[1]
NUM_TOP_CLASSES = min(num_classes, 5) # show this many top labels
NUM_OUTPUTS = self.model_state['layers'][self.softmax_name]['outputs']
PRED_IDX = 1
label_names = [lab.split(',')[0] for lab in self.test_data_provider.batch_meta['label_names']]
if self.only_errors:
preds = n.zeros((data[0].shape[1], NUM_OUTPUTS), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, NUM_OUTPUTS), dtype=n.single)
#rand_idx = nr.permutation(n.r_[n.arange(1), n.where(data[1] == 552)[1], n.where(data[1] == 795)[1], n.where(data[1] == 449)[1], n.where(data[1] == 274)[1]])[:NUM_IMGS]
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
if NUM_IMGS < data[0].shape[1]:
data = [n.require(d[:,rand_idx], requirements='C') for d in data]
# data += [preds]
# Run the model
print [d.shape for d in data], preds.shape
self.libmodel.startFeatureWriter(data, [preds], [self.softmax_name])
IGPUModel.finish_batch(self)
print preds
data[0] = self.test_data_provider.get_plottable_data(data[0])
if self.save_preds:
if not gfile.Exists(self.save_preds):
gfile.MakeDirs(self.save_preds)
preds_thresh = preds > 0.5 # Binarize predictions
data[0] = data[0] * 255.0
data[0][data[0]<0] = 0
data[0][data[0]>255] = 255
data[0] = n.require(data[0], dtype=n.uint8)
dir_name = '%s_predictions_batch_%d' % (os.path.basename(self.save_file), batch)
tar_name = os.path.join(self.save_preds, '%s.tar' % dir_name)
tfo = gfile.GFile(tar_name, "w")
tf = TarFile(fileobj=tfo, mode='w')
for img_idx in xrange(NUM_IMGS):
img = data[0][img_idx,:,:,:]
imsave = Image.fromarray(img)
prefix = "CORRECT" if data[1][0,img_idx] == preds_thresh[img_idx,PRED_IDX] else "FALSE_POS" if preds_thresh[img_idx,PRED_IDX] == 1 else "FALSE_NEG"
file_name = "%s_%.2f_%d_%05d_%d.png" % (prefix, preds[img_idx,PRED_IDX], batch, img_idx, data[1][0,img_idx])
# gf = gfile.GFile(file_name, "w")
file_string = StringIO()
imsave.save(file_string, "PNG")
tarinf = TarInfo(os.path.join(dir_name, file_name))
tarinf.size = file_string.tell()
file_string.seek(0)
tf.addfile(tarinf, file_string)
tf.close()
tfo.close()
# gf.close()
print "Wrote %d prediction PNGs to %s" % (preds.shape[0], tar_name)
else:
fig = pl.figure(3, figsize=(12,9))
fig.text(.4, .95, '%s test samples' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
# what the net got wrong
if NUM_OUTPUTS > 1:
err_idx = [i for i,p in enumerate(preds.argmax(axis=1)) if p not in n.where(data[2][:,i] > 0)[0]]
else:
err_idx = n.where(data[1][0,:] != preds[:,0].T)[0]
print err_idx
err_idx = r.sample(err_idx, min(len(err_idx), NUM_IMGS))
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
cconv = colors.ColorConverter()
gs = gridspec.GridSpec(NUM_ROWS*2, NUM_COLS,
width_ratios=[1]*NUM_COLS, height_ratios=[2,1]*NUM_ROWS )
#print data[1]
for row in xrange(NUM_ROWS):
for col in xrange(NUM_COLS):
img_idx = row * NUM_COLS + col
if data[0].shape[0] <= img_idx:
break
pl.subplot(gs[(row * 2) * NUM_COLS + col])
#pl.subplot(NUM_ROWS*2, NUM_COLS, row * 2 * NUM_COLS + col + 1)
pl.xticks([])
pl.yticks([])
img = data[0][img_idx,:,:,:]
pl.imshow(img, interpolation='lanczos')
show_title = data[1].shape[0] == 1
true_label = [int(data[1][0,img_idx])] if show_title else n.where(data[1][:,img_idx]==1)[0]
#print true_label
#print preds[img_idx,:].shape
#print preds[img_idx,:].max()
true_label_names = [label_names[i] for i in true_label]
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
#print img_labels
axes = pl.subplot(gs[(row * 2 + 1) * NUM_COLS + col])
height = 0.5
ylocs = n.array(range(NUM_TOP_CLASSES))*height
pl.barh(ylocs, [l[0] for l in img_labels], height=height, \
color=['#ffaaaa' if l[1] in true_label_names else '#aaaaff' for l in img_labels])
#pl.title(", ".join(true_labels))
if show_title:
pl.title(", ".join(true_label_names), fontsize=15, fontweight='bold')
else:
print true_label_names
pl.yticks(ylocs + height/2, [l[1] for l in img_labels], x=1, backgroundcolor=cconv.to_rgba('0.65', alpha=0.5), weight='bold')
for line in enumerate(axes.get_yticklines()):
line[1].set_visible(False)
#pl.xticks([width], [''])
#pl.yticks([])
pl.xticks([])
pl.ylim(0, ylocs[-1] + height)
pl.xlim(0, 1)
def start(self):
self.op.print_values()
# print self.show_cost
if self.show_cost:
self.plot_cost()
if self.show_filters:
self.plot_filters()
if self.show_preds:
self.plot_predictions()
if pl:
pl.show()
sys.exit(0)
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('gpu', 'load_file', 'inner_size', 'train_batch_range', 'test_batch_range', 'multiview_test', 'data_path', 'pca_noise', 'scalar_mean'):
op.delete_option(option)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("norm-filters", "norm_filters", BooleanOptionParser, "Individually normalize filters shown with --show-filters", default=0)
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("save-preds", "save_preds", StringOptionParser, "Save predictions to given path instead of showing them", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("local-plane", "local_plane", IntegerOptionParser, "Local plane to show", default=0)
op.add_option("smooth-test-errors", "smooth_test_errors", BooleanOptionParser, "Use running average for test error plot?", default=1)
op.options['load_file'].default = None
return op
if __name__ == "__main__":
#nr.seed(6)
try:
op = ShowConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowConvNet(op, load_dic)
model.start()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
|
apache-2.0
|
gladk/trunk
|
doc/sphinx/ipython_directive013.py
|
8
|
27280
|
# -*- coding: utf-8 -*-
"""Sphinx directive to support embedded IPython code.
From: https://github.com/ipython/ipython/blob/master/docs/sphinxext/ipython_directive.py
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives).
By default this directive assumes that your prompts are unchanged IPython ones,
but this can be customized. The configurable options that can be placed in
conf.py are
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import cStringIO
import os
import re
import sys
import tempfile
import ast
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import matplotlib
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
matplotlib.use('Agg')
# Our own
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
inputline += '\n' + nextline[Nc:]
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self):
self.cout = cStringIO.StringIO()
# Create config object for IPython
config = Config()
config.Global.display_banner = False
config.Global.exec_lines = ['import numpy as np',
'from pylab import *'
]
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize ipython, but don't start its mainloop
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done *after* instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
#print "input='%s'"%self.input
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
source_raw = splitter.source_raw_reset()[1]
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""Process data block for INPUT token."""
decorator, input, rest = data
image_file = None
image_directive = None
#print 'INPUT:', data # dbg
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = decorator=='@doctest' or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i==0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, image_file,
image_directive)
#print 'OUTPUT', output # dbg
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, image_file):
"""Process data block for OUTPUT token."""
if is_doctest:
submitted = data.strip()
found = output
if found is not None:
found = found.strip()
# XXX - fperez: in 0.11, 'output' never comes with the prompt
# in it, just the actual output text. So I think all this code
# can be nuked...
# the above comment does not appear to be accurate... (minrk)
ind = found.find(output_prompt)
if ind<0:
e='output prompt="%s" does not match out line=%s' % \
(output_prompt, found)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
if found!=submitted:
e = ('doctest failure for input_lines="%s" with '
'found_output="%s" and submitted output="%s"' %
(input_lines, found, submitted) )
raise RuntimeError(e)
#print 'doctest PASSED for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin%lineno
output_prompt = self.promptout%lineno
image_file = None
image_directive = None
for token, data in block:
if token==COMMENT:
out_data = self.process_comment(data)
elif token==INPUT:
(out_data, input_lines, output, is_doctest, image_file,
image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token==OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
if self._pyplot_imported:
return
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive conent
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
class IpythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
}
shell = EmbeddedSphinxShell()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
return savefig_dir, source_dir, rgxin, rgxout, promptin, promptout
def setup(self):
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
seen_docs = [i for i in os.listdir(tempfile.tempdir)
if i.startswith('seen_doc')]
if seen_docs:
fname = os.path.join(tempfile.tempdir, seen_docs[0])
docs = open(fname).read().split('\n')
if not self.state.document.current_source in docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
else: # haven't processed any docs yet
docs = []
# get config values
(savefig_dir, source_dir, rgxin,
rgxout, promptin, promptout) = self.get_config_options()
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
# write the filename to a tempfile because it's been "seen" now
if not self.state.document.current_source in docs:
fd, fname = tempfile.mkstemp(prefix="seen_doc", text=True)
fout = open(fname, 'a')
fout.write(self.state.document.current_source+'\n')
fout.close()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython','']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
#text = '\n'.join(lines)
#figs = '\n'.join(figures)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
#print lines
if len(lines)>2:
if debug:
print '\n'.join(lines)
else: #NOTE: this raises some errors, what's it for?
#print 'INSERTING %d lines'%len(lines)
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
text = '\n'.join(lines)
txtnode = nodes.literal_block(text, text)
txtnode['language'] = 'ipython'
#imgnode = nodes.image(figs)
# cleanup
self.teardown()
return []#, imgnode]
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IpythonDirective)
app.add_config_value('ipython_savefig_dir', None, True)
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_promptin', 'In [%d]:', True)
app.add_config_value('ipython_promptout', 'Out[%d]:', True)
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
ipython_directive('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print 'All OK? Check figures in _static/'
|
gpl-2.0
|
msarahan/bokeh
|
tests/compat/lc_offsets.py
|
13
|
1127
|
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
import numpy as np
from bokeh import mpl
from bokeh.plotting import output_file, show
# Simulate a series of ocean current profiles, successively
# offset by 0.1 m/s so that they form what is sometimes called
# a "waterfall" plot or a "stagger" plot.
nverts = 60
ncurves = 20
offs = (0.1, 0.0)
rs = np.random.RandomState([12345678])
yy = np.linspace(0, 2 * np.pi, nverts)
ym = np.amax(yy)
xx = (0.2 + (ym - yy) / ym) ** 2 * np.cos(yy - 0.4) * 0.5
segs = []
for i in range(ncurves):
xxx = xx + 0.02 * rs.randn(nverts)
curve = list(zip(xxx, yy * 100))
segs.append(curve)
colors = [(1.0, 0.0, 0.0, 1.0), (0.0, 0.5, 0.0, 1.0), (0.0, 0.0, 1.0, 1.0),
(0.0, 0.75, 0.75, 1.0), (0.75, 0.75, 0, 1.0), (0.75, 0, 0.75, 1.0),
(0.0, 0.0, 0.0, 1.0)]
col = LineCollection(segs, linewidth=5, offsets=offs)
ax = plt.axes()
ax.add_collection(col, autolim=True)
col.set_color(colors)
ax.set_title('Successive data offsets')
fig = plt.gcf()
output_file("lc_offsets.html", title="lc_offsets.py example")
show(mpl.to_bokeh())
|
bsd-3-clause
|
mbayon/TFG-MachineLearning
|
vbig/lib/python2.7/site-packages/sklearn/neural_network/tests/test_rbm.py
|
225
|
6278
|
import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
|
mit
|
sjakthol/dedup-simulator
|
simulator/smote.py
|
1
|
4959
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
The MIT License (MIT)
Copyright (c) 2012-2013 Karsten Jeschkies <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
'''
Created on 24.11.2012
@author: karsten jeschkies <[email protected]>
This is an implementation of the SMOTE Algorithm.
See: "SMOTE: synthetic minority over-sampling technique" by
Chawla, N.V et al.
---
Modified on 5.3.2016
@author: Sami Jaktholm <[email protected]>
Added python3 support and silenced deprecation warning caused by newer scikit.
'''
import logging
import numpy as np
from random import randrange, choice
from sklearn.neighbors import NearestNeighbors
logger = logging.getLogger("main")
def SMOTE(T, N, k, h = 1.0):
"""
Returns (N/100) * n_minority_samples synthetic minority samples.
Parameters
----------
T : array-like, shape = [n_minority_samples, n_features]
Holds the minority samples
N : percetange of new synthetic samples:
n_synthetic_samples = N/100 * n_minority_samples. Can be < 100.
k : int. Number of nearest neighbours.
Returns
-------
S : Synthetic samples. array,
shape = [(N/100) * n_minority_samples, n_features].
"""
n_minority_samples, n_features = T.shape
if N < 100:
#create synthetic samples only for a subset of T.
#TODO: select random minortiy samples
N = 100
pass
if (N % 100) != 0:
raise ValueError("N must be < 100 or multiple of 100")
N = int(N/100)
n_synthetic_samples = N * n_minority_samples
S = np.zeros(shape=(n_synthetic_samples, n_features))
#Learn nearest neighbours
neigh = NearestNeighbors(n_neighbors = k)
neigh.fit(T)
#Calculate synthetic samples
for i in range(n_minority_samples):
nn = neigh.kneighbors([T[i]], return_distance=False)
for n in range(N):
nn_index = choice(nn[0])
#NOTE: nn includes T[i], we don't want to select it
while nn_index == i:
nn_index = choice(nn[0])
dif = T[nn_index] - T[i]
gap = np.random.uniform(low = 0.0, high = h)
S[n + i * N, :] = T[i,:] + gap * dif[:]
return S
def borderlineSMOTE(X, y, minority_target, N, k):
"""
Returns synthetic minority samples.
Parameters
----------
X : array-like, shape = [n__samples, n_features]
Holds the minority and majority samples
y : array-like, shape = [n__samples]
Holds the class targets for samples
minority_target : value for minority class
N : percetange of new synthetic samples:
n_synthetic_samples = N/100 * n_minority_samples. Can be < 100.
k : int. Number of nearest neighbours.
h : high in random.uniform to scale dif of snythetic sample
Returns
-------
safe : Safe minorities
synthetic : Synthetic sample of minorities in danger zone
danger : Minorities of danger zone
"""
n_samples, _ = X.shape
#Learn nearest neighbours on complete training set
neigh = NearestNeighbors(n_neighbors = k)
neigh.fit(X)
safe_minority_indices = list()
danger_minority_indices = list()
for i in xrange(n_samples):
if y[i] != minority_target: continue
nn = neigh.kneighbors(X[i], return_distance=False)
majority_neighbours = 0
for n in nn[0]:
if y[n] != minority_target:
majority_neighbours += 1
if majority_neighbours == len(nn):
continue
elif majority_neighbours < (len(nn)/2):
logger.debug("Add sample to safe minorities.")
safe_minority_indices.append(i)
else:
#DANGER zone
danger_minority_indices.append(i)
#SMOTE danger minority samples
synthetic_samples = SMOTE(X[danger_minority_indices], N, k, h = 0.5)
return (X[safe_minority_indices],
synthetic_samples,
X[danger_minority_indices])
|
apache-2.0
|
luminwin/beast-mcmc
|
doc/tutorial/EBSP/scripts/popGraphFromCSV.py
|
10
|
4725
|
#!/usr/bin/env python
import sys, os.path, math, fnmatch
from glob import glob
import optparse
from popGraphUtil import plotFromCSV, plotFromAll
parser = optparse.OptionParser(" [options] csv-file chart-file")
parser.add_option("", "--xlim", dest="xlim", help="cut off X-axis at this point", default = None)
parser.add_option("", "--ylim", dest="ylim", help="cut off Y-axis at this point", default = None)
parser.add_option("", "--logy", dest="logy", action="store_true",
help="Log scale for Y axis", default = False)
parser.add_option("", "--yscale", dest="yscale", help="Y-axis scale factor", default = 1)
parser.add_option("", "--width", dest="width",
help="figure width. Integral value with units: 50mm 2cm 3 (inches)", default = None)
# parser.add_option("", "--ms", dest="ms", help="", default = None)
parser.add_option("", "--lw", dest="lw", help="Line width", default = None)
parser.add_option("", "--font", dest="font", help="name of font for figure text ", default = None)
parser.add_option("", "--fontsize", dest="fontsize", help="font size of figure text", default = None)
# parser.add_option("", "--axes", dest="axesSize", help="", default = None)
parser.add_option("", "--ticks", dest="ticklabelsize",
help="font size of ticks labels ", default = None)
parser.add_option("", "--nxticks", dest="nxticks",
help="number of X-axis ticks", default = None)
parser.add_option("", "--title", dest="title",
help="Figure title", default = None)
parser.add_option("", "--hist", dest="hist", action="store_true",help="", default = False)
parser.add_option("", "--alldemo", dest="alldfile",
help="plot all demographic functions in this file",
default = None)
parser.add_option("-a", "--alphaout", dest="alpha", help="transparancy value of outline.", default = 1)
parser.add_option("", "--alpha", dest="alldalpha",
help="transparancy value to use when plotting all" +
" demographic. 1 - no transparancy, 0 fully transparent.", default = 0.1)
parser.add_option("", "--ratio", dest="ratio",
help="height/width ratio of figure.", default = 0.75)
options, args = parser.parse_args()
if len(args) != 2 :
print >> sys.stderr, "usage:", sys.argv[0], "csv-file", "chart-file"
sys.exit(1)
name = args[0]
trueDemo = None
plotOptionsDict = { 'alpha' : float(options.alpha),
'logy' : options.logy,
'doHist': options.hist }
if options.lw :
plotOptionsDict['mainlw'] = float(options.lw)
plotOptionsDict['hpdOutline'] = float(options.lw)/2
labelsFont = None
if options.font :
import matplotlib.font_manager
labelsFont = matplotlib.font_manager.FontProperties(options.font)
if labelsFont.get_name() != options.font :
print >> sys.stderr, "*warning:", labelsFont.get_name(),"!=",options.font
if options.fontsize :
labelsFont.set_size(float(options.fontsize))
import pylab
def convertToInches(w) :
if w[-2:] == 'mm' :
return int(w[:-2]) / 25.4
if w[-2:] == 'cm' :
return int(w[:-2]) / 2.54
return int(w)
if options.width is None :
fig = pylab.figure()
else :
w = convertToInches(options.width)
h = w * float(options.ratio)
fig = pylab.figure(figsize=(w,h))
if labelsFont :
labelFontDict = {'fontproperties': labelsFont}
plotOptionsDict['labelProps'] = labelFontDict
if options.alldfile:
pylab.ioff()
plotFromAll(options.alldfile, yScale = float(options.yscale),
logy = options.logy, alpha = float(options.alldalpha))
plotFromCSV(name, trueDemo, yScale = float(options.yscale), **plotOptionsDict)
if options.xlim :
pylab.xlim((0, float(options.xlim)))
if options.ylim :
pylab.ylim((0, float(options.ylim)))
if options.title :
pylab.title(options.title)
pylab.legend(loc='best')
if options.nxticks :
from matplotlib.ticker import MaxNLocator
pylab.gca().xaxis.set_major_locator(MaxNLocator(int(options.nxticks)))
if labelsFont :
ltext = pylab.gca().get_legend().get_texts()
for l in ltext :
pylab.setp(l, fontproperties = labelsFont)
if options.ticklabelsize :
s = float(options.ticklabelsize)
if labelsFont :
fp = matplotlib.font_manager.FontProperties(labelsFont.get_name())
fp.set_size(s)
fp = {'fontproperties' : fp}
else :
fp = dict()
for p in ('xticklabels', 'yticklabels') :
l = pylab.getp(pylab.gca(), p)
pylab.setp(l, fontsize=s, **fp)
if options.alldfile:
pylab.ion()
pylab.savefig(args[1], dpi=300)
|
lgpl-2.1
|
harisbal/pandas
|
pandas/tests/reshape/test_reshape.py
|
4
|
25298
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import pytest
from collections import OrderedDict
from pandas import DataFrame, Series
from pandas.core.sparse.api import SparseDtype, SparseArray
import pandas as pd
from numpy import nan
import numpy as np
from pandas.util.testing import assert_frame_equal
from pandas import get_dummies, Categorical, Index
import pandas.util.testing as tm
from pandas.compat import u
class TestGetDummies(object):
@pytest.fixture
def df(self):
return DataFrame({'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
@pytest.fixture(params=['uint8', 'i8', np.float64, bool, None])
def dtype(self, request):
return np.dtype(request.param)
@pytest.fixture(params=['dense', 'sparse'])
def sparse(self, request):
# params are strings to simplify reading test results,
# e.g. TestGetDummies::test_basic[uint8-sparse] instead of [uint8-True]
return request.param == 'sparse'
def effective_dtype(self, dtype):
if dtype is None:
return np.uint8
return dtype
def test_raises_on_dtype_object(self, df):
with pytest.raises(ValueError):
get_dummies(df, dtype='object')
def test_basic(self, sparse, dtype):
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype=self.effective_dtype(dtype))
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
if sparse:
tm.assert_sp_frame_equal(result,
expected.to_sparse(kind='integer',
fill_value=0))
else:
assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
if sparse:
expected = expected.to_sparse(kind='integer', fill_value=0)
assert_frame_equal(result, expected)
expected.index = list('ABC')
result = get_dummies(s_series_index, sparse=sparse, dtype=dtype)
if sparse:
expected.to_sparse(kind='integer', fill_value=0)
assert_frame_equal(result, expected)
def test_basic_types(self, sparse, dtype):
# GH 10531
s_list = list('abc')
s_series = Series(s_list)
s_df = DataFrame({'a': [0, 1, 0, 1, 2],
'b': ['A', 'A', 'B', 'C', 'C'],
'c': [2, 3, 3, 3, 2]})
expected = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype=self.effective_dtype(dtype),
columns=list('abc'))
if not sparse:
compare = tm.assert_frame_equal
else:
expected = expected.to_sparse(fill_value=0, kind='integer')
compare = tm.assert_sp_frame_equal
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
compare(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
compare(result, expected)
result = get_dummies(s_df, columns=s_df.columns,
sparse=sparse, dtype=dtype)
if sparse:
dtype_name = 'Sparse[{}, 0]'.format(
self.effective_dtype(dtype).name
)
else:
dtype_name = self.effective_dtype(dtype).name
expected = Series({dtype_name: 8})
tm.assert_series_equal(result.get_dtype_counts(), expected)
result = get_dummies(s_df, columns=['a'], sparse=sparse, dtype=dtype)
expected_counts = {'int64': 1, 'object': 1}
expected_counts[dtype_name] = 3 + expected_counts.get(dtype_name, 0)
expected = Series(expected_counts).sort_index()
tm.assert_series_equal(result.get_dtype_counts().sort_index(),
expected)
def test_just_na(self, sparse):
just_na_list = [np.nan]
just_na_series = Series(just_na_list)
just_na_series_index = Series(just_na_list, index=['A'])
res_list = get_dummies(just_na_list, sparse=sparse)
res_series = get_dummies(just_na_series, sparse=sparse)
res_series_index = get_dummies(just_na_series_index, sparse=sparse)
assert res_list.empty
assert res_series.empty
assert res_series_index.empty
assert res_list.index.tolist() == [0]
assert res_series.index.tolist() == [0]
assert res_series_index.index.tolist() == ['A']
def test_include_na(self, sparse, dtype):
if sparse:
pytest.xfail(reason='nan in index is problematic (GH 16894)')
s = ['a', 'b', np.nan]
res = get_dummies(s, sparse=sparse, dtype=dtype)
exp = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0]},
dtype=self.effective_dtype(dtype))
assert_frame_equal(res, exp)
# Sparse dataframes do not allow nan labelled columns, see #GH8822
res_na = get_dummies(s, dummy_na=True, sparse=sparse, dtype=dtype)
exp_na = DataFrame({nan: [0, 0, 1],
'a': [1, 0, 0],
'b': [0, 1, 0]},
dtype=self.effective_dtype(dtype))
exp_na = exp_na.reindex(['a', 'b', nan], axis=1)
# hack (NaN handling in assert_index_equal)
exp_na.columns = res_na.columns
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True,
sparse=sparse, dtype=dtype)
exp_just_na = DataFrame(Series(1, index=[0]), columns=[nan],
dtype=self.effective_dtype(dtype))
tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values)
def test_unicode(self, sparse):
# See GH 6885 - get_dummies chokes on unicode values
import unicodedata
e = 'e'
eacute = unicodedata.lookup('LATIN SMALL LETTER E WITH ACUTE')
s = [e, eacute, eacute]
res = get_dummies(s, prefix='letter', sparse=sparse)
exp = DataFrame({'letter_e': [1, 0, 0],
u('letter_%s') % eacute: [0, 1, 1]},
dtype=np.uint8)
if sparse:
tm.assert_sp_frame_equal(res, exp.to_sparse(fill_value=0,
kind='integer'))
else:
assert_frame_equal(res, exp)
def test_dataframe_dummies_all_obj(self, df, sparse):
df = df[['A', 'B']]
result = get_dummies(df, sparse=sparse)
expected = DataFrame({'A_a': [1, 0, 1],
'A_b': [0, 1, 0],
'B_b': [1, 1, 0],
'B_c': [0, 0, 1]},
dtype=np.uint8)
if sparse:
expected = pd.SparseDataFrame({
"A_a": pd.SparseArray([1, 0, 1], dtype='uint8'),
"A_b": pd.SparseArray([0, 1, 0], dtype='uint8'),
"B_b": pd.SparseArray([1, 1, 0], dtype='uint8'),
"B_c": pd.SparseArray([0, 0, 1], dtype='uint8'),
})
tm.assert_sp_frame_equal(result, expected)
else:
assert_frame_equal(result, expected)
def test_dataframe_dummies_mix_default(self, df, sparse, dtype):
result = get_dummies(df, sparse=sparse, dtype=dtype)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame({'C': [1, 2, 3],
'A_a': arr([1, 0, 1], dtype=typ),
'A_b': arr([0, 1, 0], dtype=typ),
'B_b': arr([1, 1, 0], dtype=typ),
'B_c': arr([0, 0, 1], dtype=typ)})
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_list(self, df, sparse):
prefixes = ['from_A', 'from_B']
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'from_B_b': [1, 1, 0],
'from_B_c': [0, 0, 1]},
dtype=np.uint8)
expected[['C']] = df[['C']]
cols = ['from_A_a', 'from_A_b', 'from_B_b', 'from_B_c']
expected = expected[['C'] + cols]
typ = pd.SparseArray if sparse else pd.Series
expected[cols] = expected[cols].apply(lambda x: typ(x))
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_str(self, df, sparse):
# not that you should do this...
result = get_dummies(df, prefix='bad', sparse=sparse)
bad_columns = ['bad_a', 'bad_b', 'bad_b', 'bad_c']
expected = DataFrame([[1, 1, 0, 1, 0],
[2, 0, 1, 1, 0],
[3, 1, 0, 0, 1]],
columns=['C'] + bad_columns,
dtype=np.uint8)
expected = expected.astype({"C": np.int64})
if sparse:
# work around astyping & assigning with duplicate columns
# https://github.com/pandas-dev/pandas/issues/14427
expected = pd.concat([
pd.Series([1, 2, 3], name='C'),
pd.Series([1, 0, 1], name='bad_a', dtype='Sparse[uint8]'),
pd.Series([0, 1, 0], name='bad_b', dtype='Sparse[uint8]'),
pd.Series([1, 1, 0], name='bad_b', dtype='Sparse[uint8]'),
pd.Series([0, 0, 1], name='bad_c', dtype='Sparse[uint8]'),
], axis=1)
assert_frame_equal(result, expected)
def test_dataframe_dummies_subset(self, df, sparse):
result = get_dummies(df, prefix=['from_A'], columns=['A'],
sparse=sparse)
expected = DataFrame({'B': ['b', 'b', 'c'],
'C': [1, 2, 3],
'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0]}, dtype=np.uint8)
expected[['C']] = df[['C']]
if sparse:
cols = ['from_A_a', 'from_A_b']
expected[cols] = expected[cols].apply(lambda x: pd.SparseSeries(x))
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_sep(self, df, sparse):
result = get_dummies(df, prefix_sep='..', sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'A..a': [1, 0, 1],
'A..b': [0, 1, 0],
'B..b': [1, 1, 0],
'B..c': [0, 0, 1]},
dtype=np.uint8)
expected[['C']] = df[['C']]
expected = expected[['C', 'A..a', 'A..b', 'B..b', 'B..c']]
if sparse:
cols = ['A..a', 'A..b', 'B..b', 'B..c']
expected[cols] = expected[cols].apply(lambda x: pd.SparseSeries(x))
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep=['..', '__'], sparse=sparse)
expected = expected.rename(columns={'B..b': 'B__b', 'B..c': 'B__c'})
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep={'A': '..', 'B': '__'},
sparse=sparse)
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix=['too few'], sparse=sparse)
def test_dataframe_dummies_prefix_sep_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix_sep=['bad'], sparse=sparse)
def test_dataframe_dummies_prefix_dict(self, sparse):
prefixes = {'A': 'from_A', 'B': 'from_B'}
df = DataFrame({'C': [1, 2, 3],
'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c']})
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'from_B_b': [1, 1, 0],
'from_B_c': [0, 0, 1]})
columns = ['from_A_a', 'from_A_b', 'from_B_b', 'from_B_c']
expected[columns] = expected[columns].astype(np.uint8)
if sparse:
expected[columns] = expected[columns].apply(
lambda x: pd.SparseSeries(x)
)
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_na(self, df, sparse, dtype):
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True,
sparse=sparse, dtype=dtype).sort_index(axis=1)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame({'C': [1, 2, 3, np.nan],
'A_a': arr([1, 0, 1, 0], dtype=typ),
'A_b': arr([0, 1, 0, 0], dtype=typ),
'A_nan': arr([0, 0, 0, 1], dtype=typ),
'B_b': arr([1, 1, 0, 0], dtype=typ),
'B_c': arr([0, 0, 1, 0], dtype=typ),
'B_nan': arr([0, 0, 0, 1], dtype=typ)
}).sort_index(axis=1)
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, sparse=sparse, dtype=dtype)
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_categorical(self, df, sparse, dtype):
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, sparse=sparse, dtype=dtype).sort_index(axis=1)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame({'C': [1, 2, 3],
'A_a': arr([1, 0, 1], dtype=typ),
'A_b': arr([0, 1, 0], dtype=typ),
'B_b': arr([1, 1, 0], dtype=typ),
'B_c': arr([0, 0, 1], dtype=typ),
'cat_x': arr([1, 0, 0], dtype=typ),
'cat_y': arr([0, 1, 1], dtype=typ)
}).sort_index(axis=1)
assert_frame_equal(result, expected)
@pytest.mark.parametrize('get_dummies_kwargs,expected', [
({'data': pd.DataFrame(({u'ä': ['a']}))},
pd.DataFrame({u'ä_a': [1]}, dtype=np.uint8)),
({'data': pd.DataFrame({'x': [u'ä']})},
pd.DataFrame({u'x_ä': [1]}, dtype=np.uint8)),
({'data': pd.DataFrame({'x': [u'a']}), 'prefix':u'ä'},
pd.DataFrame({u'ä_a': [1]}, dtype=np.uint8)),
({'data': pd.DataFrame({'x': [u'a']}), 'prefix_sep':u'ä'},
pd.DataFrame({u'xäa': [1]}, dtype=np.uint8))])
def test_dataframe_dummies_unicode(self, get_dummies_kwargs, expected):
# GH22084 pd.get_dummies incorrectly encodes unicode characters
# in dataframe column names
result = get_dummies(**get_dummies_kwargs)
assert_frame_equal(result, expected)
def test_basic_drop_first(self, sparse):
# GH12402 Add a new parameter `drop_first` to avoid collinearity
# Basic case
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype=np.uint8)
result = get_dummies(s_list, drop_first=True, sparse=sparse)
if sparse:
expected = expected.to_sparse(fill_value=0, kind='integer')
assert_frame_equal(result, expected)
result = get_dummies(s_series, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
expected.index = list('ABC')
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
def test_basic_drop_first_one_level(self, sparse):
# Test the case that categorical variable only has one level.
s_list = list('aaa')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame(index=np.arange(3))
result = get_dummies(s_list, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
result = get_dummies(s_series, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
expected = DataFrame(index=list('ABC'))
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
def test_basic_drop_first_NA(self, sparse):
# Test NA handling together with drop_first
s_NA = ['a', 'b', np.nan]
res = get_dummies(s_NA, drop_first=True, sparse=sparse)
exp = DataFrame({'b': [0, 1, 0]}, dtype=np.uint8)
if sparse:
exp = exp.to_sparse(fill_value=0, kind='integer')
assert_frame_equal(res, exp)
res_na = get_dummies(s_NA, dummy_na=True, drop_first=True,
sparse=sparse)
exp_na = DataFrame(
{'b': [0, 1, 0],
nan: [0, 0, 1]},
dtype=np.uint8).reindex(['b', nan], axis=1)
if sparse:
exp_na = exp_na.to_sparse(fill_value=0, kind='integer')
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True, drop_first=True,
sparse=sparse)
exp_just_na = DataFrame(index=np.arange(1))
assert_frame_equal(res_just_na, exp_just_na)
def test_dataframe_dummies_drop_first(self, df, sparse):
df = df[['A', 'B']]
result = get_dummies(df, drop_first=True, sparse=sparse)
expected = DataFrame({'A_b': [0, 1, 0],
'B_c': [0, 0, 1]},
dtype=np.uint8)
if sparse:
expected = expected.to_sparse(fill_value=0, kind='integer')
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_categorical(
self, df, sparse, dtype):
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, drop_first=True, sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'A_b': [0, 1, 0],
'B_c': [0, 0, 1],
'cat_y': [0, 1, 1]})
cols = ['A_b', 'B_c', 'cat_y']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_b', 'B_c', 'cat_y']]
if sparse:
for col in cols:
expected[col] = pd.SparseSeries(expected[col])
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_na(self, df, sparse):
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True, drop_first=True,
sparse=sparse).sort_index(axis=1)
expected = DataFrame({'C': [1, 2, 3, np.nan],
'A_b': [0, 1, 0, 0],
'A_nan': [0, 0, 0, 1],
'B_c': [0, 0, 1, 0],
'B_nan': [0, 0, 0, 1]})
cols = ['A_b', 'A_nan', 'B_c', 'B_nan']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected.sort_index(axis=1)
if sparse:
for col in cols:
expected[col] = pd.SparseSeries(expected[col])
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, drop_first=True,
sparse=sparse)
expected = expected[['C', 'A_b', 'B_c']]
assert_frame_equal(result, expected)
def test_int_int(self):
data = Series([1, 2, 1])
result = pd.get_dummies(data)
expected = DataFrame([[1, 0],
[0, 1],
[1, 0]],
columns=[1, 2],
dtype=np.uint8)
tm.assert_frame_equal(result, expected)
data = Series(pd.Categorical(['a', 'b', 'a']))
result = pd.get_dummies(data)
expected = DataFrame([[1, 0],
[0, 1],
[1, 0]],
columns=pd.Categorical(['a', 'b']),
dtype=np.uint8)
tm.assert_frame_equal(result, expected)
def test_int_df(self, dtype):
data = DataFrame(
{'A': [1, 2, 1],
'B': pd.Categorical(['a', 'b', 'a']),
'C': [1, 2, 1],
'D': [1., 2., 1.]
}
)
columns = ['C', 'D', 'A_1', 'A_2', 'B_a', 'B_b']
expected = DataFrame([
[1, 1., 1, 0, 1, 0],
[2, 2., 0, 1, 0, 1],
[1, 1., 1, 0, 1, 0]
], columns=columns)
expected[columns[2:]] = expected[columns[2:]].astype(dtype)
result = pd.get_dummies(data, columns=['A', 'B'], dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_preserve_categorical_dtype(self, dtype):
# GH13854
for ordered in [False, True]:
cat = pd.Categorical(list("xy"), categories=list("xyz"),
ordered=ordered)
result = get_dummies(cat, dtype=dtype)
data = np.array([[1, 0, 0], [0, 1, 0]],
dtype=self.effective_dtype(dtype))
cols = pd.CategoricalIndex(cat.categories,
categories=cat.categories,
ordered=ordered)
expected = DataFrame(data, columns=cols,
dtype=self.effective_dtype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('sparse', [True, False])
def test_get_dummies_dont_sparsify_all_columns(self, sparse):
# GH18914
df = DataFrame.from_dict(OrderedDict([('GDP', [1, 2]),
('Nation', ['AB', 'CD'])]))
df = get_dummies(df, columns=['Nation'], sparse=sparse)
df2 = df.reindex(columns=['GDP'])
tm.assert_frame_equal(df[['GDP']], df2)
def test_get_dummies_duplicate_columns(self, df):
# GH20839
df.columns = ["A", "A", "A"]
result = get_dummies(df).sort_index(axis=1)
expected = DataFrame([[1, 1, 0, 1, 0],
[2, 0, 1, 1, 0],
[3, 1, 0, 0, 1]],
columns=['A', 'A_a', 'A_b', 'A_b', 'A_c'],
dtype=np.uint8).sort_index(axis=1)
expected = expected.astype({"A": np.int64})
tm.assert_frame_equal(result, expected)
class TestCategoricalReshape(object):
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
def test_reshaping_panel_categorical(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
class TestMakeAxisDummies(object):
def test_preserve_categorical_dtype(self):
# GH13854
for ordered in [False, True]:
cidx = pd.CategoricalIndex(list("xyz"), ordered=ordered)
midx = pd.MultiIndex(levels=[['a'], cidx],
labels=[[0, 0], [0, 1]])
df = DataFrame([[10, 11]], index=midx)
expected = DataFrame([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
index=midx, columns=cidx)
from pandas.core.reshape.reshape import make_axis_dummies
result = make_axis_dummies(df)
tm.assert_frame_equal(result, expected)
result = make_axis_dummies(df, transform=lambda x: x)
tm.assert_frame_equal(result, expected)
|
bsd-3-clause
|
williamalu/mimo_usrp
|
scripts/gen_noise.py
|
1
|
1828
|
import numpy as np
import matplotlib.pyplot as plt
def make_pulses(data, T, pulse):
widen = np.zeros(len(data) * T, dtype=np.complex64)
for idx, val in enumerate(widen):
if idx % T == 0:
widen[idx] = data[ idx//T ]
return np.array(np.convolve(widen, pulse, 'full'), dtype=np.complex64)
def raised_cosine(size, T):
W = 1/T
pulse = np.zeros(size, dtype=np.complex64)
alpha = 0.5
for idx, t in enumerate(range(-size//T, size//T)):
val = np.sinc(2*W*t) * ( np.cos( 2*np.pi*alpha*W*t )/( 1 - 16 * (alpha**2) * (W**2) * (t**2)) )
pulse[idx] = t
plt.plot(pulse)
plt.show()
exit()
return pulse
if __name__ == "__main__":
data_path = '../data/'
# Gen noise
np.random.seed(45)
noise_size = 10000
noise1 = np.array(np.random.choice([0.5, -0.5], size=noise_size))
noise2 = np.array(np.random.choice([0.5, -0.5], size=noise_size))
# Make noise into pulses
T = 10
pulse = np.ones(10)
noise1 = make_pulses(noise1, T, pulse)
noise2 = make_pulses(noise2, T, pulse)
# Save noise for cross correlation later
noise1.tofile(data_path + "noise_1.bin")
noise2.tofile(data_path + "noise_2.bin")
# Make filler so we can send everything at once
zeros_gap = np.zeros(10000)
zeros = np.zeros(len(noise1))
# Data for channel 1
channel1 = np.concatenate( [noise1, zeros_gap, zeros] )
channel2 = np.concatenate( [zeros, zeros_gap, noise2] )
channel1 = np.array( channel1, dtype=np.complex64 )
channel2 = np.array( channel2, dtype=np.complex64 )
# Save out data
channel1.tofile(data_path + "noise_1_transmit.bin")
channel2.tofile(data_path + "noise_2_transmit.bin")
# Plot for verification
plt.plot(channel1)
plt.plot(channel2)
plt.show()
|
mit
|
changyaochen/zi_DAQs
|
vacmega_open_loop_2ch.py
|
1
|
5176
|
# -*- coding: utf-8 -*-
"""
This is for multiple open loop sweep with different vac
the inputs are from both channels, therefore I am going to record
the results simultanetansouly from multiple demods
with the function open_loop_sweep_2ch()
@author: changyao chen
"""
from open_loop_sweep_2ch import open_loop_sweep_2ch
import os
import numpy as np
import pandas as pd
import zi_processing
# get the directories right
dname = os.path.dirname(os.path.abspath(__file__))
os.chdir(dname)
device_id= 'dev267'
# ===== global setting =======
_debug_ = True
vac_list = np.linspace(0.01, 0.51, 2)
start_freq = 27.00e3
stop_freq = 40.00e3
samplecount = 13000
avg_sample= 3
if _debug_:
vac_list = [0.1]; samplecount = 100 # for debug purpose
inplace_fit = False # whether to do Lorentz fit for each sweep
# ==============================
# I will save the output into 2 types of files:
# for the first type, each vac has its own file
# for the second type is the usual megasweep format (single file)
#
# with vac attached to the file name
# initilization for the type_II_ch1 and _ch2 data
type_II_ch1 = pd.DataFrame()
type_II_ch2 = pd.DataFrame()
# initialize output file
fitted_results_ch1 = []
fitted_results_ch2 = []
# only to extract certain fields from the raw output
headers = ['frequency', 'x', 'y']
# prompt to ask for path to save data
handle = input('saved file path/name: ')
if len(handle) == 0:
handle = 'temp'
# create a folder for the results
if not os.path.exists(handle):
os.makedirs(handle)
os.chdir(dname + '/' + handle)
elif handle == 'temp':
os.chdir(dname + '/' + handle)
else:
raise Exception('Please input a valid file path/name!')
# I need to somehow save the experiment parameters...
parameters = { # default values
'avg_sample': 10,
'avg_tc': 15,
'samplecount': 1000,
'tc': 0.005,
'rate': 2000
}
# update the default values
parameters['samplecount'] = samplecount
parameters['start_freq'] = start_freq
parameters['stop_freq'] = stop_freq
parameters['avg_sample'] = avg_sample
for vac in vac_list:
# run one open loop sweep
# the return result is a list, and its [0][0] element is the dict
# that is of interest
result_1, result_2 = open_loop_sweep_2ch(device_id = 'dev267',
start_freq = start_freq, stop_freq = stop_freq,
amplitude=vac, avg_sample= avg_sample,
samplecount = samplecount)
type_I_temp_ch1 = pd.DataFrame.from_dict({x: result_1[0][0][x] for x in headers})
type_I_temp_ch1['vac'] = vac
type_I_temp_ch2 = pd.DataFrame.from_dict({x: result_2[0][0][x] for x in headers})
type_I_temp_ch2['vac'] = vac
# save the type I data
type_I_temp_ch1.to_csv(handle + '_ch1_' + str(vac) + '.txt', sep = '\t',
index = False)
type_I_temp_ch2.to_csv(handle + '_ch2_' + str(vac) + '.txt', sep = '\t',
index = False)
# do the fit if chosen
if inplace_fit:
# fit the data to a Lorentz curve, ch1
p_fit, p_err = zi_processing.fit_lorentz_sweeper(type_I_temp_ch1, showHTML = False,
figure_name = handle + '_ch1_' + str(vac),
zoom_in_fit = False)
A, f0, Q, bkg = [x for x in p_fit]
A_err, f0_err, Q_err, bkg_err = [x for x in p_err]
fitted_results_ch1.append([vac, f0, f0_err, Q, Q_err])
# fit the data to a Lorentz curve, ch2
p_fit, p_err = zi_processing.fit_lorentz_sweeper(type_I_temp_ch2, showHTML = False,
figure_name = handle + '_ch2_' + str(vac),
zoom_in_fit = False)
A, f0, Q, bkg = [x for x in p_fit]
A_err, f0_err, Q_err, bkg_err = [x for x in p_err]
fitted_results_ch2.append([vac, f0, f0_err, Q, Q_err])
if type_II_ch1.size == 0: # first time
type_II_ch1 = type_I_temp_ch1
type_II_ch2 = type_I_temp_ch2
else:
type_II_ch1 = type_II_ch1.append(type_I_temp_ch1, ignore_index = True)
type_II_ch2 = type_II_ch2.append(type_I_temp_ch2, ignore_index = True)
# save the type_II_ch1 data
type_II_ch1.to_csv(handle + '_ch1_vacmega.txt', sep = '\t',
index = False, headers = False)
# save the type_II_ch2 data
type_II_ch2.to_csv(handle + '_ch2_vacmega.txt', sep = '\t',
index = False, headers = False)
# save the fitted result
if inplace_fit:
fitted_resulst_ch1 = pd.DataFrame(fitted_results_ch1, columns = ['vac(V)', 'f0(Hz)', 'f0_err(Hz)',
'Q', 'Q_err'])
fitted_results_ch1.to_csv(handle + '_ch1_fitted_results.txt', sep = '\t',
index = False)
# save the parameter!
with open('sweep_parameters.txt','w') as f:
for key in parameters:
f.write('{:20} : {}\n'.format(key, parameters[key]))
# return to the parent folder
os.chdir('..')
|
gpl-3.0
|
Calvinxc1/Data_Analytics
|
old versions/analysis_scripts.py
|
1
|
60386
|
#%% Libraries
import pandas as pd
import numpy as np
from math import log
from sklearn import metrics as skmet
import sys
from tabulate import tabulate
#%% Dataset Splitter Function
def data_split(dataFrame, ratios, seed = None):
## Prepare Data Frame
if type(dataFrame) == pd.core.frame.DataFrame:
df_dataFrame = dataFrame[:]
else:
print "Cannot recognise dataFrame input."
return
## Prepare splitting ratio list
if type(ratios) == list:
if all(isinstance(item, float) for item in ratios):
if sum(ratios) > 1:
print "Ratios are over 100%, cannot split more data than is available."
return
else:
lst_ratios = ratios[:]
if sum(lst_ratios) < 1:
print "Ratios are under 100%, some data will be dropped from original."
else:
print "Cannot recognise ratios input."
return
else:
print "Cannot recognise ratios input."
return
## Prepare random seed value
if seed is None:
int_seedVal = np.random.randint(0, sys.maxint)
else:
try:
int_seedVal = int(seed)
except:
print "Cannot recognise seed input."
return
np.random.seed(int_seedVal)
## Randomly permutates index numbers
lst_indexArrange = np.random.permutation(df_dataFrame.index)
## Grabs brackets of index values, based on lst_ratios input
lst_dataSets = []
for int_indexRatio in range(len(lst_ratios)):
df_dataset = df_dataFrame.loc[lst_indexArrange[int(round(float(sum(lst_ratios[0:int_indexRatio])) * len(df_dataFrame.index))):int(round(float(sum(lst_ratios[0:int_indexRatio + 1])) * len(df_dataFrame.index)))]]
lst_dataSets.append({
"Dataset": df_dataset,
"Ratio": lst_ratios[int_indexRatio],
"Observations": len(df_dataset.index)
})
## Return list of final dataset splits
return lst_dataSets
#%% Data Preperation Function
def data_prep(dataFrame, variables, intercept = False, interactions = None, featOrder = None, catDropFirst = None, contNorm = None, contPoly = None):
## Prepare Variables list
if (type(variables) == list) | (type(variables) == np.ndarray):
arr_vars = np.array(variables)
int_countVars = arr_vars.size
else:
print "Cannot recognise variables input."
return
## Prepare Data Frame
if type(dataFrame) == pd.core.frame.DataFrame:
df_dataFrame = dataFrame[arr_vars]
int_countObs = df_dataFrame.index.size
else:
print "Cannot recognise dataFrame input."
return
## Prepare Interactions List
if (type(interactions) == list) | (type(interactions) == np.ndarray):
if all(isinstance(item, list) for item in interactions):
lst_inter = interactions
elif all(isinstance(item, int) for item in interactions) | all(isinstance(item, str) for item in interactions):
lst_inter = [interactions]
else:
print "Cannot recognise interactions input."
return
elif interactions is None:
lst_inter = []
else:
print "Cannot recognise interactions input."
return
if featOrder is None:
mod_featOrder = None
elif (type(featOrder) == list) | (type(featOrder) == np.ndarray):
mod_featOrder = np.array(featOrder)
else:
print "Cannot recognise featOrder input."
return
## Prepare Categorical: Drop First option set
if catDropFirst is None:
arr_catDropFirst = np.array(["None"] * int_countVars)
elif type(catDropFirst) == str:
arr_catDropFirst = np.array([catDropFirst] * int_countVars)
elif (type(catDropFirst) == list) | (type(catDropFirst) == np.ndarray):
if len(catDropFirst) == int_countVars:
arr_catDropFirst = np.array(catDropFirst)
else:
print "catDropFirst list does not have equal length with variables."
return
else:
print "Cannot recognise catDropFirst input."
return
## Prepare Continuous: Normalize Data option set
if contNorm is None:
arr_contNorm = np.array(["None"] * int_countVars)
elif type(contNorm) == str:
arr_contNorm = np.array([contNorm] * int_countVars)
elif (type(contNorm) == list) | (type(contNorm) == np.ndarray):
if len(contNorm) == int_countVars:
arr_contNorm = contNorm
else:
print "contNorm list does not have equal length with variables."
return
else:
print "Cannot recognise contNorm input."
return
## Prepare Continuous: Polynomialize Data option set
if contPoly is None:
arr_contPoly = np.array([1] * int_countVars)
elif type(contPoly) == int:
arr_contPoly = np.array([contPoly] * int_countVars)
elif (type(contPoly) == list) | (type(contPoly) == np.ndarray):
if len(contPoly) == int_countVars:
arr_contPoly = contPoly
else:
print "contPoly list does not have equal length with variables."
return
else:
print "Cannot recognise contPoly input."
return
## Prepare Intercept option
try:
bol_intercept = bool(intercept)
except:
print "cannot recognise intercept input."
return
## Initialize starting points /w Intercept(s)
if bol_intercept:
arr_data = np.array([[1.0]] * df_dataFrame.shape[0])
arr_feats = np.array(["Intercept"], dtype = str)
arr_types = np.array(["Intercept"], dtype = str)
lst_featureGroups = [[0]]
int_resultIndex = 1
else:
arr_data = np.array(np.empty(shape = (df_dataFrame.shape[0], 0)))
arr_feats = np.empty(shape = 0, dtype = str)
arr_types = np.empty(shape = 0, dtype = str)
lst_featureGroups = []
int_resultIndex = 0
## Iterate over each feature and transform as appropriate
arr_dTypes = df_dataFrame[arr_vars].dtypes.values
for int_varIndex in range(int_countVars):
mod_currDType = arr_dTypes[int_varIndex]
if np.issubdtype(mod_currDType, int) | np.issubdtype(mod_currDType, float):
arr_bufferCol = np.array(np.empty((int_countObs, 0), dtype = float))
arr_bufferFeats = np.empty(0)
arr_bufferTypes = np.empty(0)
lst_bufferFeatGrp = []
for exp in range(1, arr_contPoly[int_varIndex] + 1):
arr_bufferTemp = np.array(df_dataFrame[[arr_vars[int_varIndex]]].values ** exp)
if arr_contNorm[int_varIndex] == "Z-Score":
arr_bufferTemp = (arr_bufferTemp - np.mean(arr_bufferTemp, axis = 0)) / np.std(arr_bufferTemp, axis = 0)
elif arr_contNorm[int_varIndex] == "Mean Center":
arr_bufferTemp = arr_bufferTemp - np.mean(arr_bufferTemp, axis = 0)
elif arr_contNorm[int_varIndex] == "Variance Center":
arr_bufferTemp = arr_bufferTemp / np.std(arr_bufferTemp, axis = 0)
arr_bufferCol = np.append(arr_bufferCol, arr_bufferTemp, axis = 1)
if exp == 1:
arr_bufferFeats = np.append(arr_bufferFeats, arr_vars[int_varIndex])
else:
arr_bufferFeats = np.append(arr_bufferFeats, arr_vars[int_varIndex] + "_" + str(exp))
arr_bufferTypes = np.append(arr_bufferTypes, "Continuous")
lst_bufferFeatGrp.append(int_resultIndex)
int_resultIndex += 1
elif mod_currDType == bool:
arr_bufferCol = np.array(df_dataFrame[[arr_vars[int_varIndex]]].values.astype(int).astype(float))
arr_bufferFeats = arr_vars[int_varIndex]
arr_bufferTypes = np.array(["Categorical"])
lst_bufferFeatGrp = [int_resultIndex]
int_resultIndex += 1
elif mod_currDType == object:
if arr_catDropFirst[int_varIndex] == "All":
bol_dropFirst = True
elif (arr_catDropFirst[int_varIndex] == "Dummy") & (pd.unique(df_dataFrame[arr_vars[int_varIndex]]).size == 2):
bol_dropFirst = True
else:
bol_dropFirst = False
arr_bufferCol = pd.get_dummies(df_dataFrame[[arr_vars[int_varIndex]]], drop_first = bol_dropFirst)
arr_bufferFeats = arr_bufferCol.columns.values.astype(str)
int_bufferCatLen = arr_bufferFeats.size
arr_bufferTypes = np.array(["Categorical"] * int_bufferCatLen)
lst_bufferFeatGrp = range(int_resultIndex, int_resultIndex + int_bufferCatLen)
int_resultIndex += int_bufferCatLen
else:
print "Unknown data type on variable", arr_vars[int_varIndex]
continue
arr_data = np.append(arr_data, arr_bufferCol, axis = 1)
arr_feats = np.append(arr_feats, arr_bufferFeats)
arr_types = np.append(arr_types, arr_bufferTypes)
lst_featureGroups.append(lst_bufferFeatGrp)
## Iterate over all combinations of interactions
for lst_currInter in lst_inter:
lst_interIndex = []
for mod_interItem in lst_currInter:
if all(isinstance(item, int) for item in lst_currInter):
int_featIndex = mod_interItem
elif all(isinstance(item, str) for item in lst_currInter):
int_featIndex = int(np.where(mod_interItem == arr_vars)[0])
if bol_intercept:
int_featIndex += 1
lst_interIndex.append(lst_featureGroups[int_featIndex])
int_currInterLen = len(lst_interIndex)
arr_bufferCol = np.array(np.empty((int_countObs, 0), dtype = float))
arr_bufferFeats = np.empty(0)
arr_bufferTypes = np.empty(0)
for int_currInterStartIndex in range(0, int_currInterLen - 1):
for int_currInterStart in lst_interIndex[int_currInterStartIndex]:
for int_currInterEndIndex in range(int_currInterStartIndex + 1, int_currInterLen):
for int_currInterEnd in lst_interIndex[int_currInterEndIndex]:
arr_bufferTemp = (arr_data[:, int_currInterStart] * arr_data[:, int_currInterEnd]).reshape(int_countObs, 1)
arr_bufferCol = np.append(arr_bufferCol, arr_bufferTemp, axis = 1)
arr_bufferFeats = np.append(arr_bufferFeats, arr_feats[int_currInterStart] + "*" + arr_feats[int_currInterEnd])
arr_bufferTypes = np.append(arr_bufferTypes, arr_types[int_currInterStart] + "*" + arr_types[int_currInterEnd])
arr_data = np.append(arr_data, arr_bufferCol, axis = 1)
arr_feats = np.append(arr_feats, arr_bufferFeats)
arr_types = np.append(arr_types, arr_bufferTypes)
arr_forcedCats = np.empty(0)
if mod_featOrder is not None:
arr_actualIndex = np.empty(0, dtype = int)
for currFeatIndex in range(mod_featOrder.size):
if currFeatIndex >= arr_feats.size:
arr_data = np.append(arr_data, np.array([0] * int_countObs).reshape((int_countObs, 1)), axis = 1)
arr_feats = np.append(arr_feats, mod_featOrder[currFeatIndex])
arr_forcedCats = np.append(arr_forcedCats, mod_featOrder[currFeatIndex])
arr_actualIndex = np.append(arr_actualIndex, arr_feats.size - 1)
else:
if mod_featOrder[currFeatIndex] != arr_feats[currFeatIndex]:
if mod_featOrder[currFeatIndex] in arr_feats:
arr_actualIndex = np.append(arr_actualIndex, np.where(arr_feats == mod_featOrder[currFeatIndex])[0][0])
else:
arr_data = np.append(arr_data, np.array([0] * int_countObs).reshape((int_countObs, 1)), axis = 1)
arr_feats = np.append(arr_feats, mod_featOrder[currFeatIndex])
arr_forcedCats = np.append(arr_forcedCats, mod_featOrder[currFeatIndex])
arr_actualIndex = np.append(arr_actualIndex, arr_feats.size - 1)
else:
arr_actualIndex = np.append(arr_actualIndex, currFeatIndex)
arr_data = arr_data[:, arr_actualIndex]
arr_feats = arr_feats[arr_actualIndex]
## Create return dictionary object
dic_returnVar = {
"Array": {
"Data": arr_data,
"Labels": arr_vars,
"Features": arr_feats,
"Types": arr_types,
"Groups": lst_featureGroups,
},
"Options": {
"Categorical": {
"Drop First": arr_catDropFirst
},
"Continuous": {
"Normalization": arr_contNorm,
"Polynomial": arr_contPoly
},
"Intercept": bol_intercept,
"Interactions": lst_inter,
"Forced Ordering": {
"Sequence": mod_featOrder,
"Created": arr_forcedCats
}
}
}
## Return dictionary object
return dic_returnVar
#%% Regression - Multipurpose Step Module
def reg_step_mod(str_regType, arr_xSample, arr_ySample, arr_betas, int_chunkSize, int_batchSize, flt_l1Weight, flt_l2Weight, arr_intercept):
## Initialize basic data shape variables
int_obsCount = arr_xSample.shape[0]
int_featCount = arr_xSample.shape[1]
int_catCount = arr_ySample.shape[2]
## Linear Weights Calculation
arr_betaDotX = np.sum(arr_xSample * arr_betas, axis = 1).reshape((int_obsCount, 1, int_catCount))
## Split calculations by model type
if str_regType == "Linear": ## For a Linear regression model
arr_predict = arr_betaDotX
arr_err = arr_ySample - arr_predict
arr_deriv = -2 * np.mean(arr_xSample * arr_err, axis = 0)
arr_l1Val = (2 * np.mean(arr_xSample * (arr_ySample - (arr_predict - (arr_xSample * arr_betas))), axis = 0)).reshape((1, int_featCount, int_catCount))
arr_l2Mod = 2 * arr_betas * flt_l2Weight
arr_deriv2 = 2 * np.mean(arr_xSample ** 2, axis = 0).reshape((1, int_featCount, 1))
arr_l2Mod2 = np.array([[2 * flt_l2Weight] * int_catCount] * int_featCount).reshape((1, int_featCount, int_catCount))
elif str_regType == "Logistic": ## For a Logistic regression model
arr_betaDotX = np.sum(arr_xSample * arr_betas, axis = 1).reshape((int_obsCount, 1, int_catCount))
arr_predict = 1 / (1 + np.exp(-arr_betaDotX))
arr_err = arr_ySample - arr_predict
arr_deriv = np.sum(-arr_xSample * arr_err, axis = 0).reshape((1, int_featCount, int_catCount))
arr_l1Val = np.sum(arr_xSample * (arr_ySample - (1 / (1 + np.exp(-(arr_betaDotX - arr_xSample))))), axis = 0).reshape((1, int_featCount, int_catCount))
arr_l2Mod = 2 * arr_betas * flt_l2Weight
arr_deriv2 = np.sum((arr_xSample ** 2) * (arr_predict * (1 - arr_predict)), axis = 0).reshape((1, int_featCount, int_catCount))
arr_l2Mod2 = np.array([[2 * flt_l2Weight] * int_catCount] * int_featCount).reshape((1, int_featCount, int_catCount))
elif str_regType == "Ordered Logistic": ## For an Ordered Logistic regression model
arr_betaDotX = np.sum(arr_xSample * arr_betas, axis = 1).reshape((int_obsCount, 1, int_catCount))
arr_predict = 1 / (1 + np.exp(-arr_betaDotX))
arr_err = arr_ySample - arr_predict
arr_deriv = np.sum(-arr_xSample * arr_err, axis = 0).reshape((1, int_featCount, int_catCount))
arr_l1Val = np.sum(arr_xSample * (arr_ySample - (1 / (1 + np.exp(-(arr_betaDotX - arr_xSample))))), axis = 0).reshape((1, int_featCount, int_catCount))
arr_l2Mod = 2 * arr_betas * flt_l2Weight
arr_deriv2 = np.sum((arr_xSample ** 2) * (arr_predict * (1 - arr_predict)), axis = 0).reshape((1, int_featCount, int_catCount))
arr_l2Mod2 = np.array([[2 * flt_l2Weight] * int_catCount] * int_featCount).reshape((1, int_featCount, int_catCount))
## Universal calculations
arr_l1Keep = np.absolute(arr_l1Val) >= flt_l1Weight
arr_l1Keep[:, arr_intercept] = True
arr_l2Mod[:, arr_intercept] = 0.0
arr_deriv = arr_deriv + arr_l2Mod
arr_l2Mod2[:, arr_intercept] = 0
arr_deriv2 = arr_deriv2 + arr_l2Mod2
arr_stepSize = arr_deriv / arr_deriv2
arr_stepSize = np.nan_to_num(arr_stepSize) / int_featCount
arr_betas = (arr_betas - (arr_stepSize / int_chunkSize)) * arr_l1Keep
if str_regType == "Ordered Logistic":
arr_intBuff = arr_betas[:, arr_intercept]
arr_betas = np.mean(arr_betas, axis = 2).reshape(1, int_featCount, 1)
arr_betas[:, arr_intercept] = 1
arr_intBuff = np.append(arr_intBuff, np.ones((1, int_featCount - arr_intercept.size, int_catCount)), axis = 1)
arr_betas = arr_betas * arr_intBuff
## Return resulting betas and l1 values
return arr_betas, arr_l1Val
#%% Regression - Multipurpose Error Module
def reg_err_mod(str_regType, arr_xSample, arr_ySample, arr_betas):
## Initialize basic data shape variables
int_obsCount = arr_xSample.shape[0]
#int_featCount = arr_xSample.shape[1]
int_catCount = arr_ySample.shape[2]
## Split calculations by model type
if str_regType == "Linear":
arr_predict = np.sum(arr_xSample * arr_betas, axis = 1).reshape((int_obsCount, 1, int_catCount))
arr_err = arr_ySample - arr_predict
arr_errVal = np.sqrt(np.mean(arr_err ** 2, axis = 0)).reshape(int_catCount)
arr_yErrVal = np.sqrt(np.mean((arr_ySample - np.mean(arr_ySample, axis = 0).reshape(1, 1, int_catCount)) ** 2, axis = 0)).reshape(int_catCount)
elif str_regType == "Logistic":
arr_betaDotX = np.sum(arr_xSample * arr_betas, axis = 1).reshape((int_obsCount, 1, int_catCount))
arr_errVal = -np.mean(-arr_betaDotX * (1 - arr_ySample) - np.log(1 + np.exp(-arr_betaDotX)), axis = 0)
arr_yErrVal = np.log(np.mean(arr_ySample, axis = 0) / (1 - np.mean(arr_ySample, axis = 0)))
arr_yErrVal = -np.mean(-arr_yErrVal * (1 - arr_ySample) - np.log(1 + np.exp(-arr_yErrVal)), axis = 0)
elif str_regType == "Ordered Logistic":
arr_betaDotX = np.sum(arr_xSample * arr_betas, axis = 1).reshape((int_obsCount, 1, int_catCount))
arr_errVal = -np.mean(-arr_betaDotX * (1 - arr_ySample) - np.log(1 + np.exp(-arr_betaDotX)), axis = 0)
arr_yErrVal = np.log(np.mean(arr_ySample, axis = 0) / (1 - np.mean(arr_ySample, axis = 0)))
arr_yErrVal = -np.mean(-arr_yErrVal * (1 - arr_ySample) - np.log(1 + np.exp(-arr_yErrVal)), axis = 0)
## Return model error and mean error values
return arr_errVal, arr_yErrVal
#%% Regression - Gradient Subsystem
def reg_grad(regType, xMatrix, yMatrix, intercept = True, l1Weight = 0, l2Weight = 0, chunkCycles = 1000, chunkSize = 0, batchSize = 0, errChangeBreak = False, corrStart = False, randBeta = False, seedVal = None):
try:
str_regType = str(regType)
except:
print "Cannot recognise regType input"
return
if type(xMatrix) == np.ndarray:
arr_xMatrix = xMatrix
else:
print "Cannot recognise xMatrix input"
return
if type(yMatrix) == np.ndarray:
arr_yMatrix = yMatrix
else:
print "Cannot recognise yMatrix input"
return
if intercept == True:
arr_intercept = np.array([0])
elif intercept == False:
arr_intercept = np.empty(0)
else:
try:
arr_intercept = np.array(intercept)
except:
print "Cannot recognise intercept input"
return
try:
bol_corrStart = bool(corrStart)
except:
print "Cannot recognise corrStart input"
return
try:
bol_randBeta = bool(randBeta)
except:
print "Cannot recognise randBeta input"
return
try:
flt_l1Weight = float(l1Weight)
if flt_l1Weight < 0:
print "L1 Weight cannot be negative"
return
except:
print "Cannot recognise l1Weight input"
return
try:
flt_l2Weight = float(l2Weight)
if flt_l1Weight < 0:
print "L2 Weight cannot be negative"
return
except:
print "Cannot recognise l2Weight input"
return
int_obsCount = int(arr_xMatrix.shape[0])
int_featCount = int(arr_xMatrix.shape[1])
int_catCount = int(arr_yMatrix.shape[1])
str_regType = str(str_regType)
if errChangeBreak == False:
flt_errChangeBreak = False
else:
flt_errChangeBreak = float(errChangeBreak)
if seedVal is None:
int_seedVal = np.random.randint(0, sys.maxint)
else:
int_seedVal = int(seedVal)
if batchSize >= 1:
int_batchSize = int(batchSize)
elif batchSize == 0:
int_batchSize = int_obsCount
else:
int_batchSize = int(round(batchSize * int_obsCount))
if chunkSize >= 1:
int_chunkSize = int(chunkSize)
elif chunkSize == 0:
int_chunkSize = int(int_obsCount / int_batchSize)
else:
int_chunkSize = int((int_obsCount * chunkSize) / int_batchSize)
int_obsPull = int_chunkSize * int_batchSize
flt_chunkRatio = int_obsPull / float(int_obsCount)
int_dataRuns = int(np.floor(flt_chunkRatio))
int_chunkFirst = int(np.round((flt_chunkRatio - int_dataRuns) * int_obsCount))
int_chunkCycles = int(chunkCycles)
np.random.seed(int_seedVal)
arr_xMatrix = arr_xMatrix.reshape(int_obsCount, int_featCount, 1)
arr_yMatrix = arr_yMatrix.reshape(int_obsCount, 1, int_catCount)
if bol_corrStart:
arr_xSum = np.sum(arr_xMatrix, axis = 0)
arr_ySum = np.sum(arr_yMatrix, axis = 0)
arr_xSumSq = np.sum(arr_xMatrix ** 2, axis = 0)
arr_ySumSq = np.sum(arr_yMatrix ** 2, axis = 0)
arr_xySum = np.sum(arr_xMatrix * arr_yMatrix, axis = 0)
arr_betas = (((int_obsCount * arr_xySum) - (arr_xSum * arr_ySum)) / (np.sqrt((int_obsCount * arr_xSumSq) - (arr_xSum ** 2)) * np.sqrt((int_obsCount * arr_ySumSq) - (arr_ySum ** 2)))).reshape((1, int_featCount, int_catCount))
arr_betas[((arr_betas == -np.inf) | (arr_betas == np.inf) | (np.isnan(arr_betas)))] = 0
else:
arr_betas = np.array([[0.0] * int_catCount] * int_featCount).reshape((1, int_featCount, int_catCount))
if bol_randBeta:
arr_betas = np.random.normal(arr_betas, 1 / np.sqrt(int_obsCount), (int_featCount, int_catCount))
arr_errVal = np.array([np.inf] * int_catCount)
int_iterCounter = 0
arr_pathErrVal = np.empty((0, int_catCount))
arr_pathBetas = np.empty((0, int_featCount, int_catCount))
for chunkCycle in range(int_chunkCycles):
arr_chunkObs = np.random.choice(int_obsCount, int_chunkFirst, replace = False)
for dataChunk in range(int_dataRuns):
arr_chunkObs = np.append(arr_chunkObs, np.random.choice(int_obsCount, int_obsCount, replace = False))
arr_priorErrVal = arr_errVal
arr_runningErrVal = np.empty((0, int_catCount))
arr_runningYErrVal = np.empty((0, int_catCount))
arr_runningBetas = np.empty((0, int_featCount, int_catCount))
arr_runningL1Val = np.empty((0, int_featCount, int_catCount))
for batchCount in range(int_chunkSize):
int_startIndex = batchCount * int_batchSize
arr_obsIndex = np.take(arr_chunkObs, np.arange(int_startIndex, int_startIndex + int_batchSize))
arr_xSample = np.take(arr_xMatrix, arr_obsIndex, axis = 0)
arr_ySample = np.take(arr_yMatrix, arr_obsIndex, axis = 0)
arr_betas, arr_l1Val = reg_step_mod(str_regType, arr_xSample, arr_ySample, arr_betas, int_chunkSize, int_batchSize, flt_l1Weight, flt_l2Weight, arr_intercept)
arr_errVal, arr_yErrVal = reg_err_mod(str_regType, arr_xSample, arr_ySample, arr_betas)
arr_runningErrVal = np.append(arr_runningErrVal, arr_errVal.reshape((1, int_catCount)), axis = 0)
arr_runningYErrVal = np.append(arr_runningYErrVal, arr_yErrVal.reshape((1, int_catCount)), axis = 0)
arr_runningBetas = np.append(arr_runningBetas, arr_betas, axis = 0)
arr_runningL1Val = np.append(arr_runningL1Val, arr_l1Val, axis = 0)
arr_pathErrVal = np.append(arr_pathErrVal, arr_errVal.reshape((1, int_catCount)), axis = 0)
int_iterCounter += 1
arr_l1Val = np.round(np.mean(arr_runningL1Val, axis = 0)).reshape((1, int_featCount, int_catCount))
arr_l1Keep = np.absolute(arr_l1Val) > flt_l1Weight
arr_l1Keep[:, arr_intercept] = True
arr_pathBetas = np.append(arr_pathBetas, np.mean(arr_runningBetas, axis = 0).reshape((1, int_featCount, int_catCount)) * arr_l1Keep, axis = 0)
if flt_errChangeBreak != False:
arr_errVal = np.mean(arr_runningErrVal, axis = 0)
arr_errChange = arr_errVal - arr_priorErrVal
if np.sum(arr_errChange < -flt_errChangeBreak) > 0:
break
arr_l1Val = np.round(np.mean(arr_runningL1Val, axis = 0)).reshape((int_featCount, int_catCount))
arr_l1Keep = np.absolute(arr_l1Val) > flt_l1Weight
arr_l1Keep[arr_intercept] = True
arr_betas = np.mean(arr_runningBetas, axis = 0) * arr_l1Keep
arr_errVal = np.mean(arr_runningErrVal, axis = 0)
arr_yErrVal = np.mean(arr_runningYErrVal, axis = 0)
dic_returnModel = {
"Coefficients": arr_betas,
"Diagnostic": {
"Model Error": arr_errVal,
"Mean Error": arr_yErrVal,
"Error Path": arr_pathErrVal,
"Coef Path": arr_pathBetas,
"Used Coef": arr_l1Keep
},
"Model": {
"Type": str_regType,
"Solution": {
"Type": "Gradient",
"Batch Size": int_batchSize,
"Batches per Chunk": int_chunkSize,
"Chunk Cycles": int_chunkCycles,
"Error Improvement Threshold": flt_errChangeBreak,
"Iterations": {
"Actual": int_iterCounter,
"Maximum": int_chunkSize * int_chunkCycles
}
},
"L1": flt_l1Weight,
"L2": flt_l2Weight,
"Random Seed": int_seedVal,
"Coefficient Seed": {
"Correlation": bol_corrStart,
"Random": bol_randBeta
},
"Intercept": arr_intercept
}
}
return dic_returnModel
#%% Decision Tree Category Predictor Module
def tree_predict(arr_yMatrix):
arr_predictions = np.mean(arr_yMatrix, axis = 0)
dic_predict = {
"Probabilities": arr_predictions,
"Category": np.argmax(arr_predictions),
"Observations": arr_yMatrix.shape[0],
"Error": 1 - np.max(arr_predictions)
}
return dic_predict
#%% Decision Tree Leaf Creator Module
def tree_leaf(dic_nodePredict, int_currDepth, arr_xMatrix, arr_yMatrix):
dic_returnLeaf = {
"Leaf": True,
"Prediction": dic_nodePredict,
"Depth": int_currDepth,
"Arrays": {
"X Array": arr_xMatrix,
"Y Array": arr_yMatrix
}
}
return dic_returnLeaf
#%% Decision Tree Best Split Module
def tree_splitter(arr_xMatrix, arr_yMatrix, lst_xTypes, lst_xTitles, int_minSplitted):
lst_splitCandidates = []
for featureIndex in range(len(lst_xTitles)):
if lst_xTypes[featureIndex] == "Continuous":
lst_meanStDevVals = []
for yIndex in range(arr_yMatrix.shape[1]):
arr_yVals = arr_xMatrix[arr_yMatrix[:, yIndex] == 1, featureIndex]
if len(arr_yVals) <= 1:
continue
else:
flt_yMean = np.mean(arr_yVals)
flt_yStDev = np.std(arr_yVals)
lst_meanStDevVals.append((flt_yMean, flt_yStDev))
lst_meanStDevVals = list(set(lst_meanStDevVals))
if len(lst_meanStDevVals) == 1:
continue
lst_meanStDevVals.sort(key = lambda tup: tup[0])
flt_bestError = 1.0
dic_featureSplit = None
for meanIndex in range(len(lst_meanStDevVals) - 1):
tup_lowVal = lst_meanStDevVals[meanIndex]
tup_highVal = lst_meanStDevVals[meanIndex + 1]
flt_zMulti = (tup_highVal[0] - tup_lowVal[0]) / (tup_highVal[1] + tup_lowVal[1])
flt_checkVal = tup_lowVal[0] + (tup_lowVal[1] * flt_zMulti)
arr_splitHigh = arr_yMatrix[arr_xMatrix[:, featureIndex] >= flt_checkVal]
if arr_splitHigh.shape[0] < int_minSplitted:
continue
dic_probHigh = tree_predict(arr_splitHigh)
flt_errHigh = 1 - dic_probHigh["Probabilities"][dic_probHigh["Category"]]
dic_highNode = {
"Feature": {
"Title": lst_xTitles[featureIndex],
"Type": lst_xTypes[featureIndex],
"Break": flt_checkVal,
"Direction": "High"
},
"Array": {
"Y Array": arr_splitHigh,
"X Array": arr_xMatrix[arr_xMatrix[:, featureIndex] >= flt_checkVal]
},
"Prediction": dic_probHigh,
"Node Error": flt_errHigh,
"Observations": arr_splitHigh.shape[0]
}
arr_splitLow = arr_yMatrix[arr_xMatrix[:, featureIndex] < flt_checkVal]
if arr_splitLow.shape[0] < int_minSplitted:
continue
dic_probLow = tree_predict(arr_splitLow)
flt_errLow = 1 - dic_probLow["Probabilities"][dic_probLow["Category"]]
dic_lowNode = {
"Feature": {
"Title": lst_xTitles[featureIndex],
"Type": lst_xTypes[featureIndex],
"Break": flt_checkVal,
"Direction": "Low"
},
"Array": {
"Y Array": arr_splitLow,
"X Array": arr_xMatrix[arr_xMatrix[:, featureIndex] < flt_checkVal]
},
"Prediction": dic_probLow,
"Node Error": flt_errLow,
"Observations": arr_splitLow.shape[0]
}
dic_splitOption = {
"Splits": [dic_highNode, dic_lowNode],
"Error": {
"Overall": ((flt_errHigh * dic_probHigh["Observations"]) + (flt_errLow * dic_probLow["Observations"])) / arr_yMatrix.shape[0],
"Splits": [flt_errHigh, flt_errLow]
},
"Feature": {
"Title": lst_xTitles[featureIndex],
"Type": lst_xTypes[featureIndex],
"Break": flt_checkVal,
"Splits": ["gte", "lt"]
}
}
if dic_splitOption["Error"]["Overall"] < flt_bestError:
dic_featureSplit = dic_splitOption
flt_bestError = dic_splitOption["Error"]["Overall"]
if dic_featureSplit != None:
lst_splitCandidates.append(dic_featureSplit)
else:
arr_splitHigh = arr_yMatrix[arr_xMatrix[:, featureIndex] == 1]
if arr_splitHigh.shape[0] < int_minSplitted:
continue
dic_probHigh = tree_predict(arr_splitHigh)
flt_errHigh = 1 - dic_probHigh["Probabilities"][dic_probHigh["Category"]]
dic_highNode = {
"Feature": {
"Title": lst_xTitles[featureIndex],
"Type": lst_xTypes[featureIndex],
"Direction": 1
},
"Array": {
"Y Array": arr_splitHigh,
"X Array": arr_xMatrix[arr_xMatrix[:, featureIndex] == 1]
},
"Prediction": dic_probHigh,
"Node Error": flt_errHigh,
"Observations": arr_splitHigh.shape[0]
}
arr_splitLow = arr_yMatrix[arr_xMatrix[:, featureIndex] == 0]
if arr_splitLow.shape[0] < int_minSplitted:
continue
dic_probLow = tree_predict(arr_splitLow)
flt_errLow = 1 - dic_probLow["Probabilities"][dic_probLow["Category"]]
dic_lowNode = {
"Feature": {
"Title": lst_xTitles[featureIndex],
"Type": lst_xTypes[featureIndex],
"Direction": 0
},
"Array": {
"Y Array": arr_splitLow,
"X Array": arr_xMatrix[arr_xMatrix[:, featureIndex] == 0]
},
"Prediction": dic_probLow,
"Node Error": flt_errLow,
"Observations": arr_splitLow.shape[0]
}
dic_featureSplit = {
"Splits": [dic_highNode, dic_lowNode],
"Error": {
"Overall": ((flt_errHigh * dic_probHigh["Observations"]) + (flt_errLow * dic_probLow["Observations"])) / arr_yMatrix.shape[0],
"Splits": [flt_errHigh, flt_errLow]
},
"Feature": {
"Title": lst_xTitles[featureIndex],
"Type": lst_xTypes[featureIndex],
"Splits": [1, 0]
}
}
lst_splitCandidates.append(dic_featureSplit)
flt_bestError = 1.0
if len(lst_splitCandidates) == 0:
return "Leaf"
else:
for splitItem in lst_splitCandidates:
if splitItem["Error"]["Overall"] < flt_bestError:
dic_bestSplit = splitItem
flt_bestError = splitItem["Error"]["Overall"]
return dic_bestSplit
#%% Decision Tree Constructor Module
def tree_constructor(arr_xMatrix, lst_xFeatures, lst_xTypes, arr_yMatrix, lst_yFeatures, dic_nodePredict, int_maxDepth, int_minSplit, int_minSplitted, flt_minErrDiff, int_currDepth = 0):
if (int_maxDepth != 0) & (int_currDepth >= int_maxDepth):
return tree_leaf(dic_nodePredict, int_currDepth, arr_xMatrix, arr_yMatrix)
elif len(lst_xFeatures) == 0:
return tree_leaf(dic_nodePredict, int_currDepth, arr_xMatrix, arr_yMatrix)
elif arr_xMatrix.shape[0] < int_minSplit:
return tree_leaf(dic_nodePredict, int_currDepth, arr_xMatrix, arr_yMatrix)
elif dic_nodePredict["Error"] == 0:
return tree_leaf(dic_nodePredict, int_currDepth, arr_xMatrix, arr_yMatrix)
else:
dic_split = tree_splitter(arr_xMatrix, arr_yMatrix, lst_xTypes, lst_xFeatures, int_minSplitted)
if dic_split == "Leaf":
return tree_leaf(dic_nodePredict, int_currDepth, arr_xMatrix, arr_yMatrix)
else:
flt_errImp = dic_nodePredict["Error"] - dic_split["Error"]["Overall"]
if (flt_errImp <= flt_minErrDiff) & (flt_minErrDiff >= 0):
return tree_leaf(dic_nodePredict, int_currDepth, arr_xMatrix, arr_yMatrix)
else:
lst_splits = []
for splitIndex in range(len(dic_split["Splits"])):
lst_splits.append(tree_constructor(dic_split["Splits"][splitIndex]["Array"]["X Array"], lst_xFeatures, lst_xTypes, dic_split["Splits"][splitIndex]["Array"]["Y Array"], lst_yFeatures, dic_split["Splits"][splitIndex]["Prediction"], int_maxDepth, int_minSplit, int_minSplitted, flt_minErrDiff, int_currDepth + 1))
dic_currentNode = {
"Split Feature": dic_split["Feature"],
"Splits": lst_splits,
"Leaf": False,
"Prediction": dic_nodePredict,
"Depth": int_currDepth,
"Arrays": {
"X Array": arr_xMatrix,
"Y Array": arr_yMatrix
}
}
return dic_currentNode
#%% Decision Tree Function
def decision_tree(arr_xMatrix, lst_xFeatures, lst_xTypes, arr_yMatrix, lst_yFeatures, maxDepth = 0, minSplit = 2, minChildSplit = 1, minErrDiff = 0):
int_maxDepth = int(maxDepth)
int_minSplit = int(minSplit)
int_minChildSplit = int(minChildSplit)
flt_minErrDiff = float(minErrDiff)
dic_rootPredict = tree_predict(arr_yMatrix)
dic_tree = tree_constructor(arr_xMatrix, lst_xFeatures, lst_xTypes, arr_yMatrix, lst_yFeatures, dic_rootPredict, int_maxDepth, int_minSplit, int_minChildSplit, flt_minErrDiff)
dic_returnTree = {
"Tree": dic_tree,
"Model": {
"Max Depth": int_maxDepth,
"Minimum to Split": {
"Current Node": int_minSplit,
"Child Nodes": int_minChildSplit
},
"Minimum Improved Error": flt_minErrDiff,
"Type": "Decision Tree",
"Solution": "Classification"
}
}
return dic_returnTree
#%% Multi-Purpose Model Builder Function
def model_build(str_modelType, df_dataFrame, lst_indepVars, lst_depVars, intercept = None, interactions = None, indep_catDropFirst = None, indep_contNorm = None, indep_contPoly = None, dep_catDropFirst = None, dep_contNorm = None, dep_contPoly = None, **kwargs):
if type(lst_depVars) == str:
lst_depVars = [lst_depVars]
if (str_modelType == "Linear") | (str_modelType == "Logistic") | (str_modelType == "Ordered Logistic"):
if indep_catDropFirst is None:
indep_catDropFirst = "All"
if indep_contNorm is None:
indep_contNorm = "None"
if indep_contPoly is None:
indep_contPoly = 1
if dep_catDropFirst is None:
dep_catDropFirst = "None"
if dep_contNorm is None:
dep_contNorm = "None"
if dep_contPoly is None:
dep_contPoly = 1
if intercept is None:
intercept = True
if interactions is None:
interactions = []
dic_indepData = data_prep(df_dataFrame, lst_indepVars, intercept = intercept, interactions = interactions, catDropFirst = indep_catDropFirst, contNorm = indep_contNorm, contPoly = indep_contPoly)
dic_depData = data_prep(df_dataFrame, lst_depVars, intercept = False, interactions = None, catDropFirst = dep_catDropFirst, contNorm = dep_contNorm, contPoly = dep_contPoly)
dic_returnModel = reg_grad(str_modelType, dic_indepData["Array"]["Data"], dic_depData["Array"]["Data"], intercept = intercept, **kwargs)
dic_returnModel["Coefficients"] = pd.DataFrame(dic_returnModel["Coefficients"], columns = dic_depData["Array"]["Features"], index = dic_indepData["Array"]["Features"])
dic_returnModel["Diagnostic"]["Error"] = pd.DataFrame(np.append(np.append(dic_returnModel["Diagnostic"]["Model Error"], dic_returnModel["Diagnostic"]["Mean Error"]), 1 - (dic_returnModel["Diagnostic"]["Model Error"] / dic_returnModel["Diagnostic"]["Mean Error"])).reshape((3, len(dic_depData["Array"]["Features"]))), columns = dic_depData["Array"]["Features"], index = ["Model Error", "Mean Error", "R Square"])
dic_returnModel["Diagnostic"].pop("Model Error")
dic_returnModel["Diagnostic"].pop("Mean Error")
dic_returnModel["Diagnostic"]["Error Path"] = pd.DataFrame(dic_returnModel["Diagnostic"]["Error Path"], columns = dic_depData["Array"]["Features"])
dic_returnModel["Diagnostic"]["Used Coef"] = pd.DataFrame(dic_returnModel["Diagnostic"]["Used Coef"], index = dic_indepData["Array"]["Features"], columns = dic_depData["Array"]["Features"])
elif str_modelType == "Tree":
if indep_catDropFirst is None:
indep_catDropFirst = "Binary"
if indep_contNorm is None:
indep_contNorm = "None"
if dep_catDropFirst is None:
dep_catDropFirst = "None"
if dep_contNorm is None:
dep_contNorm = "None"
if intercept is None:
intercept = False
dic_indepData = data_prep(df_dataFrame, lst_indepVars, intercept, indep_catDropFirst, indep_contNorm)
dic_depData = data_prep(df_dataFrame, lst_depVars, False, dep_catDropFirst, dep_contNorm)
dic_returnModel = decision_tree(dic_indepData["Array"]["Data"], dic_indepData["Array"]["Features"], dic_indepData["Array"]["Types"], dic_depData["Array"]["Data"], dic_depData["Array"]["Features"], **kwargs)
dic_returnModel["Model"]["Independent"] = dic_indepData
dic_returnModel["Model"]["Dependent"] = dic_depData
return dic_returnModel
#%% Linear Regression Predictor Module
def pred_lin_reg(arr_indepVars, arr_betas):
arr_pred = np.dot(arr_indepVars, arr_betas)
return arr_pred
#%% Logistic Regression Predictor Module
def pred_log_reg(arr_indepVars, arr_betas, arr_features, predType = "cats"):
arr_pred = 1 / (1 + np.exp(-np.dot(arr_indepVars, arr_betas)))
if predType == "cats":
if arr_pred.shape[1] == 1:
arr_pred = np.round(arr_pred).astype(bool)
else:
arr_pred = np.argmax(arr_pred, axis = 1)
arr_pred = arr_features[arr_pred].reshape((arr_indepVars.shape[0], 1))
return arr_pred
#%% Decision Tree Predictor Walker Module
def pred_tree_walk(arr_xMatrix, lst_xFeatures, lst_yFeatures, dic_currNode, predType = "cats"):
if dic_currNode["Leaf"] == True:
if predType == "probs":
mod_retVal = dic_currNode["Prediction"]["Probabilities"]
else:
str_title = lst_yFeatures[dic_currNode["Prediction"]["Category"]].split("_", 1)[0]
mod_retVal = [lst_yFeatures[dic_currNode["Prediction"]["Category"]].split(str_title + "_", 1)[1]]
return mod_retVal
else:
mod_xFeature = arr_xMatrix[lst_xFeatures.index(dic_currNode["Split Feature"]["Title"])]
if dic_currNode["Split Feature"]["Type"] == "Continuous":
if mod_xFeature >= dic_currNode["Split Feature"]["Break"]:
str_splitIndex = "gte"
else:
str_splitIndex = "lt"
dic_nextNode = dic_currNode["Splits"][dic_currNode["Split Feature"]["Splits"].index(str_splitIndex)]
else:
dic_nextNode = dic_currNode["Splits"][dic_currNode["Split Feature"]["Splits"].index(mod_xFeature)]
return pred_tree_walk(arr_xMatrix, lst_xFeatures, lst_yFeatures, dic_nextNode, predType)
#%% Multi-form Predictor
def model_pred(dic_model, df_dataFrame, **kwargs):
lst_indexGroup = dic_model["Model"]["Dependent"]["Array"]["Groups"][0]
str_label = dic_model["Model"]["Dependent"]["Array"]["Labels"][0]
arr_features = np.empty(0)
for feature in dic_model["Model"]["Dependent"]["Array"]["Features"][lst_indexGroup]:
arr_features = np.append(arr_features, feature.replace(str_label + "_", ""))
lst_missingCols = []
lst_dataCols = df_dataFrame.columns.values
for label in dic_model["Model"]["Independent"]["Array"]["Labels"]:
if label == "Intercept":
continue
elif label not in lst_dataCols:
lst_missingCols.append(label)
if len(lst_missingCols) > 0:
print "Dataframe does not have following columns:", lst_missingCols
return
arr_xVals = dic_model["Model"]["Independent"]["Array"]["Labels"][range(len(dic_model["Model"]["Independent"]["Array"]["Labels"]))]
dic_workingData = data_prep(df_dataFrame, arr_xVals, intercept = dic_model["Model"]["Independent"]["Options"]["Intercept"], interactions = dic_model["Model"]["Independent"]["Options"]["Interactions"], featOrder = dic_model["Model"]["Independent"]["Array"]["Features"], catDropFirst = dic_model["Model"]["Independent"]["Options"]["Categorical"]["Drop First"], contNorm = dic_model["Model"]["Independent"]["Options"]["Continuous"]["Normalization"], contPoly = dic_model["Model"]["Independent"]["Options"]["Continuous"]["Polynomial"])
if dic_model["Model"]["Type"] == "Linear":
arr_pred = pred_lin_reg(dic_workingData["Array"]["Data"], dic_model["Coefficients"].values, **kwargs)
elif dic_model["Model"]["Type"] == "Logistic":
arr_pred = pred_log_reg(dic_workingData["Array"]["Data"], dic_model["Coefficients"].values, arr_features, **kwargs)
elif dic_model["Model"]["Type"] == "Decision Tree":
arr_pred = []
for rowIndex in range(dic_workingData["Array"]["Data"].shape[0]):
lst_predictRats = pred_tree_walk(dic_workingData["Array"]["Data"][rowIndex, :], list(dic_workingData["Array"]["Features"]), list(dic_model["Model"]["Dependent"]["Array"]["Features"]), dic_model["Tree"], **kwargs)
if (len(lst_predictRats) == 2) & (type(lst_predictRats) == list):
arr_pred.append(lst_predictRats[1])
else:
arr_pred.append(lst_predictRats)
arr_pred = np.array(arr_pred)
else:
print "Unrecognised model format"
return
return arr_pred
#%% Imputer Function
def val_imputer(df_dataFrame, lst_variables, method = "ML"):
lst_goodVars = lst_variables[:]
str_method = str(method)
arr_missingCount = np.sum(df_dataFrame[indepVars].isnull().values, axis = 0)
print tabulate(np.transpose(np.array([lst_variables, arr_missingCount])), headers = ["Variable", "Missing"])
print
if str_method == "MI":
lst_missingVars = []
for missingVar in arr_missingCount.nonzero()[0]:
lst_missingVars.append((lst_variables[missingVar], arr_missingCount[missingVar], df_dataFrame[[lst_variables[missingVar]]].dtypes.values[0]))
lst_goodVars.remove(lst_variables[missingVar])
lst_missingVars.sort(key = lambda x: x[1])
for colTup in lst_missingVars:
print "Imputing for", colTup[0]
lst_indepVars = lst_goodVars[:]
str_depVar = colTup[0]
df_imputeTrain = df_dataFrame[lst_indepVars + [str_depVar]].dropna(axis = 0)
df_imputeActual = df_dataFrame[df_dataFrame[str_depVar].isnull().values][lst_indepVars]
if colTup[2] == object:
str_imputeType = "Logistic"
else:
str_imputeType = "Linear"
dic_imputeModel = model_build(str_imputeType, df_imputeTrain, lst_indepVars, str_depVar)
arr_imputePred = model_pred(dic_imputeModel, df_imputeActual)
df_dataFrame.iloc[df_imputeActual.index, df_dataFrame.columns.get_loc(str_depVar)] = arr_imputePred.flatten()
lst_goodVars.append(str_depVar)
return df_dataFrame
#%% Classification Balancer Function
def class_balance(df_dataFrame, str_classVar, groupVar = None, minObs = 0, seedVal = None):
str_groupVar = str(groupVar)
int_minObs = int(minObs)
if seedVal is None:
int_seedVal = np.random.randint(0, sys.maxint)
else:
int_seedVal = int(seedVal)
np.random.seed(int_seedVal)
arr_indexSelect = np.empty(shape = 0)
if groupVar is None:
df_classCounts = df_dataFrame[str_classVar].value_counts()
if np.min(df_classCounts.values) > int_minObs:
int_minObs = np.min(df_classCounts.values)
for className in df_classCounts.index.values:
arr_indexSelect = np.append(arr_indexSelect, np.random.choice(df_dataFrame[df_dataFrame[str_classVar] == className].index.values, int_minObs, replace = False))
else:
for groupName in df_dataFrame["DateCode"].unique():
df_classCounts = df_dataFrame.loc[df_dataFrame[str_groupVar] == groupName, str_classVar].value_counts()
if np.min(df_classCounts.values) > int_minObs:
int_minObs = np.min(df_classCounts.values)
for className in df_classCounts.index.values:
arr_indexSelect = np.append(arr_indexSelect, np.random.choice(df_dataFrame[(df_dataFrame[str_classVar] == className) & (df_dataFrame[str_groupVar] == groupName)].index.values, int_minObs, replace = False))
df_returnFrame = df_dataFrame.loc[arr_indexSelect, :]
dic_returnObj = {
"Original Data": df_dataFrame,
"Balanced Data": df_returnFrame,
"Random Seed": int_seedVal
}
return dic_returnObj
#%% Confusion Entropy Measure Function
def conf_entropy(arr_confMatrix):
int_matrixSize = arr_confMatrix.shape[1]
arr_probMatrix = np.array([np.nan] * (int_matrixSize ** 2), dtype = float).reshape((int_matrixSize, int_matrixSize))
for predItem in range(int_matrixSize):
flt_predDenom = float(arr_confMatrix[predItem, :].sum()) + float(arr_confMatrix[:, predItem].sum())
for actItem in range(int_matrixSize):
if actItem != predItem:
arr_probMatrix[actItem, predItem] = arr_confMatrix[actItem, predItem] / flt_predDenom
arr_entropyDud = arr_probMatrix.flatten()
for entItem in range(int_matrixSize ** 2):
if arr_entropyDud[entItem] != 0:
arr_entropyDud[entItem] = -arr_entropyDud[entItem] * log(arr_entropyDud[entItem], (int_matrixSize - 1) * 2)
arr_entropyDud = np.nan_to_num(arr_entropyDud.reshape((int_matrixSize, int_matrixSize)))
arr_entropyVal = np.empty(0)
arr_entropyWeight = np.empty(0)
for predItem in range(int_matrixSize):
arr_entropyVal = np.append(arr_entropyVal, arr_entropyDud[:, predItem].sum() + arr_entropyDud[predItem, :].sum())
arr_entropyWeight = np.append(arr_entropyWeight, (arr_confMatrix[:, predItem].sum() + arr_confMatrix[predItem, :].sum()) / (arr_confMatrix.sum() * 2.0))
flt_totalEntropy = np.sum(arr_entropyVal * arr_entropyWeight)
return flt_totalEntropy
#%% Category Error Rate
def class_error(arr_confMatrix):
int_matrixSize = arr_confMatrix.shape[1]
arr_actErr = np.empty(0, dtype = float)
arr_actTotal = np.sum(arr_confMatrix, axis = 1)
for actItem in range(int_matrixSize):
int_currCorrect = arr_confMatrix[actItem, actItem]
arr_actErr = np.append(arr_actErr, float(int_currCorrect) / arr_actTotal[actItem])
flt_overallErr = 1 - np.prod(arr_actErr)
return flt_overallErr
#%% Model Validator Function
def model_validator(str_modelType, df_trainData, df_validData, lst_indepVars, lst_depVars, sequence = ["poly", "l2", "l1"], maxPoly = 20, l1Net = 10, l2Net = 10, **kwargs):
flt_bestL1 = 0.0
flt_bestL2 = 0.0
arr_actualPolys = np.array([1] * len(lst_indepVars))
dic_bestModel = model_build(str_modelType, df_trainData, lst_indepVars, lst_depVars, **kwargs)
arr_actY = df_validData[lst_depVars].values
arr_predY = model_pred(dic_bestModel, df_validData).flatten()
flt_acc = np.mean(arr_actY == arr_predY)
flt_ent = conf_entropy(skmet.confusion_matrix(arr_actY, arr_predY))
flt_classErr = class_error(skmet.confusion_matrix(arr_actY, arr_predY))
flt_bestErr = flt_classErr
print "Model: Baseline - Entropy", round(flt_ent, 3), "- Accuracy", round(flt_acc, 3), "- Class Error", round(flt_bestErr, 3)
for seq in sequence:
if seq == "poly":
arr_usePolys = np.arange(2, maxPoly + 1, 1)
flt_oldErr = flt_bestErr
for indepIndex in range(len(lst_indepVars)):
mod_currDType = df_trainData[lst_indepVars].dtypes.values[indepIndex]
if np.issubdtype(mod_currDType, int) | np.issubdtype(mod_currDType, float):
flt_bestErr = flt_oldErr
arr_testPolys = np.array([1] * len(lst_indepVars))
int_bestPolyIndep = 1
for poly in arr_usePolys:
dic_useModel = model_build(str_modelType, df_trainData, lst_indepVars, lst_depVars, indep_contPoly = arr_testPolys, l1Weight = flt_bestL1, l2Weight = flt_bestL2, **kwargs)
arr_actY = df_validData[lst_depVars].values
arr_predY = model_pred(dic_useModel, df_validData).flatten()
flt_acc = np.mean(arr_actY == arr_predY)
flt_ent = conf_entropy(skmet.confusion_matrix(arr_actY, arr_predY))
flt_classErr = class_error(skmet.confusion_matrix(arr_actY, arr_predY))
flt_err = flt_classErr
if flt_err < flt_bestErr:
dic_bestModel = dic_useModel
int_bestPolyIndep = poly
flt_bestErr = flt_err
print "Model: Polynomial", lst_indepVars[indepIndex], "^", poly, "- Entropy", round(flt_ent, 3), "- Accuracy", round(flt_acc, 3), "- Class Error", round(flt_err, 3), "- Replacing"
else:
print "Model: Polynomial", lst_indepVars[indepIndex], "^", poly, "- Entropy", round(flt_ent, 3), "- Accuracy", round(flt_acc, 3), "- Class Error", round(flt_err, 3)
arr_testPolys[indepIndex] += 1
arr_actualPolys[indepIndex] = int_bestPolyIndep
dic_bestModel = model_build(str_modelType, df_trainData, lst_indepVars, lst_depVars, indep_contPoly = arr_actualPolys, l1Weight = flt_bestL1, l2Weight = flt_bestL2, **kwargs)
arr_actY = df_validData[lst_depVars].values
arr_predY = model_pred(dic_bestModel, df_validData).flatten()
flt_acc = np.mean(arr_actY == arr_predY)
flt_ent = conf_entropy(skmet.confusion_matrix(arr_actY, arr_predY))
flt_classErr = class_error(skmet.confusion_matrix(arr_actY, arr_predY))
flt_bestErr = flt_classErr
print "Model: Final Polynomial - Entropy", round(flt_ent, 3), "- Accuracy", round(flt_acc, 3), "- Class Error", round(flt_bestErr, 3)
elif seq == "l2":
arr_l2Range = np.array([10 ** x for x in range(-l2Net, l2Net)])
for l2 in arr_l2Range:
dic_useModel = model_build(str_modelType, df_trainData, lst_indepVars, lst_depVars, indep_contPoly = arr_actualPolys, l1Weight = flt_bestL1, l2Weight = l2, **kwargs)
arr_actY = df_validData[lst_depVars].values
arr_predY = model_pred(dic_useModel, df_validData).flatten()
flt_acc = np.mean(arr_actY == arr_predY)
flt_ent = conf_entropy(skmet.confusion_matrix(arr_actY, arr_predY))
flt_classErr = class_error(skmet.confusion_matrix(arr_actY, arr_predY))
flt_err = flt_classErr
if flt_err < flt_bestErr:
dic_bestModel = dic_useModel
flt_bestL2 = l2
flt_bestErr = flt_err
print "Model: L2", l2, "- Entropy", round(flt_ent, 3), "- Accuracy", round(flt_acc, 3), "- Class Error", round(flt_err, 3), "- Replacing"
else:
print "Model: L2", l2, "- Entropy", round(flt_ent, 3), "- Accuracy", round(flt_acc, 3), "- Class Error", round(flt_err, 3)
if flt_bestL2 != 0.0:
flt_l2Base = int(np.log10(flt_bestL2))
log_contL2 = True
while log_contL2:
log_contL2 = False
arr_iterVals = np.arange(flt_bestL2 - (10 ** (flt_l2Base + 1)), flt_bestL2 + (10 ** (flt_l2Base + 1)), 10 ** flt_l2Base)
arr_iterVals = arr_iterVals[arr_iterVals > 0]
for l2 in arr_iterVals:
dic_useModel = model_build(str_modelType, df_trainData, lst_indepVars, lst_depVars, indep_contPoly = arr_actualPolys, l1Weight = flt_bestL1, l2Weight = l2, **kwargs)
arr_actY = df_validData[lst_depVars].values
arr_predY = model_pred(dic_useModel, df_validData).flatten()
flt_acc = np.mean(arr_actY == arr_predY)
flt_ent = conf_entropy(skmet.confusion_matrix(arr_actY, arr_predY))
flt_classErr = class_error(skmet.confusion_matrix(arr_actY, arr_predY))
flt_err = flt_classErr
if flt_err <= flt_bestErr:
dic_bestModel = dic_useModel
flt_bestL2 = l2
flt_bestErr = flt_err
print "Model: L2", l2, "- Entropy", round(flt_ent, 3), "- Accuracy", round(flt_acc, 3), "- Class Error", round(flt_err, 3), "- Replacing"
log_contL2 = True
else:
print "Model: L2", l2, "- Entropy", round(flt_ent, 3), "- Accuracy", round(flt_acc, 3), "- Class Error", round(flt_err, 3)
if flt_err == flt_bestErr:
log_contL2 = False
flt_l2Base -= 1
elif seq == "l1":
arr_l1Range = np.array([10 ** x for x in range(-l1Net, l1Net)])
for l1 in arr_l1Range:
dic_useModel = model_build(str_modelType, df_trainData, lst_indepVars, lst_depVars, indep_contPoly = arr_actualPolys, l1Weight = l1, l2Weight = flt_bestL2, **kwargs)
arr_actY = df_validData[lst_depVars].values
arr_predY = model_pred(dic_useModel, df_validData).flatten()
flt_acc = np.mean(arr_actY == arr_predY)
flt_ent = conf_entropy(skmet.confusion_matrix(arr_actY, arr_predY))
flt_classErr = class_error(skmet.confusion_matrix(arr_actY, arr_predY))
flt_err = flt_classErr
if flt_err < flt_bestErr:
dic_bestModel = dic_useModel
flt_bestL1 = l1
flt_bestErr = flt_err
print "Model: L1", l1, "- Entropy", round(flt_ent, 3), "- Accuracy", round(flt_acc, 3), "- Class Error", round(flt_err, 3), "- Replacing"
else:
print "Model: L1", l1, "- Entropy", round(flt_ent, 3), "- Accuracy", round(flt_acc, 3), "- Class Error", round(flt_err, 3)
if flt_bestL1 != 0.0:
flt_l1Base = int(np.log10(flt_bestL1))
log_contL1 = True
while log_contL1:
log_contL1 = False
arr_iterVals = np.arange(flt_bestL1 - (10 ** (flt_l1Base + 1)), flt_bestL1 + (10 ** (flt_l1Base + 1)), 10 ** flt_l1Base)
arr_iterVals = arr_iterVals[arr_iterVals > 0]
for l1 in arr_iterVals:
dic_useModel = model_build(str_modelType, df_trainData, lst_indepVars, lst_depVars, indep_contPoly = arr_actualPolys, l1Weight = l1, l2Weight = flt_bestL2, **kwargs)
arr_actY = df_validData[lst_depVars].values
arr_predY = model_pred(dic_useModel, df_validData).flatten()
flt_acc = np.mean(arr_actY == arr_predY)
flt_ent = conf_entropy(skmet.confusion_matrix(arr_actY, arr_predY))
flt_classErr = class_error(skmet.confusion_matrix(arr_actY, arr_predY))
flt_err = flt_classErr
if flt_err <= flt_bestErr:
dic_bestModel = dic_useModel
flt_bestL1 = l1
flt_bestErr = flt_err
print "Model: L1", l1, "- Entropy", round(flt_ent, 3), "- Accuracy", round(flt_acc, 3), "- Class Error", round(flt_err, 3), "- Replacing"
log_contL1 = True
else:
print "Model: L1", l1, "- Entropy", round(flt_ent, 3), "- Accuracy", round(flt_acc, 3), "- Class Error", round(flt_err, 3)
if flt_err == flt_bestErr:
log_contL1 = False
flt_l1Base -= 1
print "Model: L2", round(flt_bestL2, 3), "- L1", round(flt_bestL1, 3), "- Class Error", round(flt_bestErr, 3)
return dic_bestModel
#%%
modelData = pd.read_csv("modelData.csv")
predModel = model_build("Tree", modelData, ["CurrEL", "RecDays", "CurrAmnt", "CurrLevel"], "NextLevel")
|
gpl-3.0
|
mne-tools/mne-python
|
examples/time_frequency/source_space_time_frequency.py
|
20
|
2307
|
"""
===================================================
Compute induced power in the source space with dSPM
===================================================
Returns STC files ie source estimates of induced power
for different bands in the source space. The inverse method
is linear based on dSPM inverse operator.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, source_band_induced_power
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
tmin, tmax, event_id = -0.2, 0.5, 1
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, include=include, exclude='bads')
# Load condition 1
event_id = 1
events = events[:10] # take 10 events to keep the computation time low
# Use linear detrend to reduce any edge artifacts
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6),
preload=True, detrend=1)
# Compute a source estimate per frequency band
bands = dict(alpha=[9, 11], beta=[18, 22])
stcs = source_band_induced_power(epochs, inverse_operator, bands, n_cycles=2,
use_fft=False, n_jobs=1)
for b, stc in stcs.items():
stc.save('induced_power_%s' % b)
###############################################################################
# plot mean power
plt.plot(stcs['alpha'].times, stcs['alpha'].data.mean(axis=0), label='Alpha')
plt.plot(stcs['beta'].times, stcs['beta'].data.mean(axis=0), label='Beta')
plt.xlabel('Time (ms)')
plt.ylabel('Power')
plt.legend()
plt.title('Mean source induced power')
plt.show()
|
bsd-3-clause
|
ezekial4/atomic_neu
|
atomic/abundance.py
|
1
|
3875
|
import numpy as np
class FractionalAbundance(object):
"""
An array of ionisation stage fractions over density and/or temperature.
Attributes:
atomic_data (AtomicData): used by Radiation class
to get various coefficients.
y (np.array, 2D): stores the fractional abundance of each
ionisation stage, k=0 to Z, for each point
of the given temperatures and densities.
Shape is (4,x)
Shape is (Z+1,x)
temperature (array_like): list of temperatures [eV]
density (array_like): list of densities [m^-3]
"""
def __init__(self, atomic_data, y, temperature, density):
self.atomic_data = atomic_data
self.y = y # fractional abundances of each charge state
self.temperature = temperature
self.density = density
def mean_charge(self):
"""
Compute the mean charge:
<Z> = sum_k ( y_k * k )
Assumes that y is a 2D array with shape
(nuclear_charge+1,# of temperatures)
Returns:
An np.array of mean charge.
"""
k = np.arange(self.y.shape[0])
k = k[:, np.newaxis]
z_mean = np.sum(self.y * k, axis=0)
return z_mean
def effective_charge(self, impurity_fraction, ion_density=None):
"""
Compute the effective charge:
n_i + n_I <Z**2>
Z_eff = ----------------
n_e
using the approximation <Z**2> = <Z>**2.
Assumes the main ion has charge +1.
Returns:
An np.array of Zeff
"""
if ion_density is None:
ion_density = self.density
impurity_density = impurity_fraction * self.density
z_mean = self.mean_charge()
zeff = (ion_density + impurity_density * z_mean**2) / self.density
return zeff
def plot_vs_temperature(self, **kwargs):
"""
Use Matplotlib to plot the abundance of each stage at each point.
If the points all have the same temperature but different density,
this won't work.
"""
import matplotlib.pyplot as plt
ax = kwargs.pop('ax', plt.gca())
lines = ax.loglog(self.temperature, self.y.T, **kwargs)
ax.set_xlabel('$T_\mathrm{e}\ [\mathrm{eV}]$')
ax.set_ylim(0.05, 1.3)
self.annotate_ionisation_stages(lines)
plt.draw_if_interactive()
return lines
def annotate_ionisation_stages(self, lines):
for i, l in enumerate(lines):
x = l.get_xdata()
y = l.get_ydata()
ax = l.axes
maxpos = y.argmax()
xy = x[maxpos], y[maxpos]
xy = self._reposition_annotation(ax, xy)
s = '$%d^+$' % (i,)
ax.annotate(s, xy, ha='left', va='bottom', color=l.get_color())
def _reposition_annotation(self, ax, xy):
xy_fig = ax.transData.transform_point(xy)
xl, yl = ax.transAxes.inverted().transform(xy_fig)
min_x, max_x = 0.01, 0.95
if xl < min_x:
xy = ax.transAxes.transform_point((min_x, yl))
xy = ax.transData.inverted().transform_point(xy)
if xl > max_x:
xy = ax.transAxes.transform_point((max_x, yl))
xy = ax.transData.inverted().transform_point(xy)
return xy
def replot_colored(self, line, lines_ref):
"""
Replot a line, colored according the most abundant state.
"""
ax = line.axes
x = line.get_xdata()
y = line.get_ydata()
lines = []
imax = np.argmax(self.y, axis=0)
for i, line_ref in enumerate(lines_ref):
mask = imax == i
lines.append(ax.plot(x[mask], y[mask], color=line_ref.get_color()))
|
mit
|
keflavich/pyradex
|
examples/h2co_grids.py
|
1
|
4376
|
import pyradex
import pylab as pl
import numpy as np
import matplotlib
ntemp,ndens = 20,20
temperatures = np.linspace(10,50,ntemp)
densities = np.logspace(2.5,7,ndens)
abundance = 10**-8.5
opr = 0.01 # assume primarily para
fortho = opr/(1+opr)
taugrid_6 = np.empty([ndens,ntemp])
texgrid_6 = np.empty([ndens,ntemp])
fluxgrid_6 = np.empty([ndens,ntemp])
taugrid_140 = np.empty([ndens,ntemp])
texgrid_140 = np.empty([ndens,ntemp])
fluxgrid_140 = np.empty([ndens,ntemp])
taugrid_150 = np.empty([ndens,ntemp])
texgrid_150 = np.empty([ndens,ntemp])
fluxgrid_150 = np.empty([ndens,ntemp])
columngrid = np.empty([ndens,ntemp])
import os
if not os.path.exists('oh2co-h2.dat'):
import urllib
urllib.urlretrieve('http://home.strw.leidenuniv.nl/~moldata/datafiles/oh2co-h2.dat')
R = pyradex.Radex(species='oh2co-h2', abundance=abundance)
R.run_radex()
# get the table so we can look at the frequency grid
table = R.get_table()
# Target frequencies:
table[np.array([0,1,3])].pprint()
for ii,tt in enumerate(temperatures):
R.temperature = tt
for jj,dd in enumerate(densities):
R.density = {'oH2':dd*fortho,'pH2':dd*(1-fortho)}
R.abundance = abundance # reset column to the appropriate value
R.run_radex(reuse_last=False, reload_molfile=True)
TI = R.source_brightness
taugrid_6[jj,ii] = R.tau[0]
texgrid_6[jj,ii] = R.tex[0].value
fluxgrid_6[jj,ii] = TI[0].value
taugrid_140[jj,ii] = R.tau[1]
texgrid_140[jj,ii] = R.tex[1].value
fluxgrid_140[jj,ii] = TI[1].value
taugrid_150[jj,ii] = R.tau[3]
texgrid_150[jj,ii] = R.tex[3].value
fluxgrid_150[jj,ii] = TI[3].value
columngrid[jj,ii] = R.column.value
pl.figure(1)
pl.clf()
extent = [densities.min(),densities.max(),temperatures.min(),temperatures.max()]
for kk,(grid,freq,label) in enumerate(zip([taugrid_140,taugrid_150,texgrid_140,texgrid_150],['140','150','140','150'],[r'\tau',r'\tau','T_{ex}','T_{ex}'])):
ax = pl.subplot(2,2,kk+1)
#ax.imshow(grid, extent=extent)
ax.pcolormesh(np.log10(densities),temperatures,grid)
ax.set_title('$%s$ o-H$_2$CO %s GHz' % (label,freq))
ax.set_xlabel('log Density')
ax.set_ylabel('Temperature')
pl.figure(2)
pl.clf()
ax = pl.subplot(2,1,1)
#ax.imshow(grid, extent=extent)
cax = ax.pcolormesh(np.log10(densities),temperatures,taugrid_140/taugrid_150, vmax=1.3, vmin=0.8)
pl.colorbar(cax)
ax.set_title('$\\tau$ o-H$_2$CO 140/150 GHz')
ax.set_xlabel('log Density')
ax.set_ylabel('Temperature')
ax = pl.subplot(2,1,2)
#ax.imshow(grid, extent=extent)
cax = ax.pcolormesh(np.log10(densities),temperatures,texgrid_140/texgrid_150, vmax=1.3, vmin=0.8)
pl.colorbar(cax)
ax.set_title('$T_{ex}$ o-H$_2$CO 140/150 GHz')
ax.set_xlabel('log Density')
ax.set_ylabel('Temperature')
pl.figure(3)
pl.clf()
ax = pl.subplot(2,1,1)
#ax.imshow(grid, extent=extent)
cax = ax.pcolormesh(np.log10(densities),temperatures,taugrid_6/taugrid_150, vmax=0.5, vmin=0.01)
pl.colorbar(cax)
ax.set_title('$\\tau$ o-H$_2$CO 6/150 GHz')
ax.set_xlabel('log Density')
ax.set_ylabel('Temperature')
ax = pl.subplot(2,1,2)
#ax.imshow(grid, extent=extent)
cax = ax.pcolormesh(np.log10(densities),temperatures,texgrid_6/texgrid_150, vmax=2, vmin=0.1)
pl.colorbar(cax)
ax.set_title('$T_{ex}$ o-H$_2$CO 6/150 GHz')
ax.set_xlabel('log Density')
ax.set_ylabel('Temperature')
pl.figure(4)
pl.clf()
extent = [densities.min(),densities.max(),temperatures.min(),temperatures.max()]
for kk,freq in enumerate([6,140,150]):
ax = pl.subplot(2,2,kk+1)
grid = eval('fluxgrid_%i' % freq)
#ax.imshow(grid, extent=extent)
ax.pcolormesh(np.log10(densities),temperatures,grid)
ax.set_title('Flux o-H$_2$CO %i GHz' % (freq))
ax.set_xlabel('log Density')
ax.set_ylabel('Temperature')
pl.figure(6)
pl.clf()
ax = pl.subplot(2,1,1)
#ax.imshow(grid, extent=extent)
cax = ax.pcolormesh(np.log10(densities),temperatures,fluxgrid_6/fluxgrid_150, vmax=0.01, vmin=0)
pl.colorbar(cax)
ax.set_title('$S_{\\nu}$ o-H$_2$CO 6/150 GHz')
ax.set_xlabel('log Density')
ax.set_ylabel('Temperature')
ax = pl.subplot(2,1,2)
#ax.imshow(grid, extent=extent)
cax = ax.pcolormesh(np.log10(densities),temperatures,fluxgrid_140/fluxgrid_150, vmax=1.2, vmin=0.8)
pl.colorbar(cax)
ax.set_title('$S_{\\nu}$ o-H$_2$CO 140/150 GHz')
ax.set_xlabel('log Density')
ax.set_ylabel('Temperature')
pl.show()
|
bsd-3-clause
|
elijah513/scikit-learn
|
doc/sphinxext/numpy_ext/docscrape_sphinx.py
|
408
|
8061
|
import re
import inspect
import textwrap
import pydoc
from .docscrape import NumpyDocString
from .docscrape import FunctionDoc
from .docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
import sphinx # local import to avoid test dependency
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Methods',):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
|
bsd-3-clause
|
brchiu/tensorflow
|
tensorflow/contrib/metrics/python/ops/metric_ops.py
|
5
|
178812
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains metric-computing operations on streamed tensors.
Module documentation, including "@@" callouts, should be put in
third_party/tensorflow/contrib/metrics/__init__.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import metrics_impl
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.distributions.normal import Normal
from tensorflow.python.util.deprecation import deprecated
# Epsilon constant used to represent extremely small quantity.
_EPSILON = 1e-7
def _safe_div(numerator, denominator):
"""Computes a safe divide which returns 0 if the denominator is zero.
Note that the function contains an additional conditional check that is
necessary for avoiding situations where the loss is zero causing NaNs to
creep into the gradient computation.
Args:
numerator: An arbitrary `Tensor`.
denominator: A `Tensor` whose shape matches `numerator` and whose values are
assumed to be non-negative.
Returns:
The element-wise value of the numerator divided by the denominator.
"""
if compat.forward_compatible(2018, 11, 1):
return math_ops.div_no_nan(numerator, denominator)
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.div(numerator,
array_ops.where(
math_ops.equal(denominator, 0),
array_ops.ones_like(denominator), denominator)),
array_ops.zeros_like(numerator))
@deprecated(None, 'Please switch to tf.metrics.true_positives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_true_positives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.true_positives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.true_negatives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_true_negatives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.true_negatives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.false_positives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_false_positives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of false positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.false_positives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.false_negatives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_false_negatives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the total number of false negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.false_negatives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.mean')
def streaming_mean(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the (weighted) mean of the given values.
The `streaming_mean` function creates two local variables, `total` and `count`
that are used to compute the average of `values`. This average is ultimately
returned as `mean` which is an idempotent operation that simply divides
`total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: `Tensor` whose rank is either 0, or the same rank as `values`, and
must be broadcastable to `values` (i.e., all dimensions must be either
`1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A `Tensor` representing the current mean, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.mean(
values=values,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.mean_tensor')
def streaming_mean_tensor(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the element-wise (weighted) mean of the given tensors.
In contrast to the `streaming_mean` function which returns a scalar with the
mean, this function returns an average tensor with the same shape as the
input tensors.
The `streaming_mean_tensor` function creates two local variables,
`total_tensor` and `count_tensor` that are used to compute the average of
`values`. This average is ultimately returned as `mean` which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: `Tensor` whose rank is either 0, or the same rank as `values`, and
must be broadcastable to `values` (i.e., all dimensions must be either
`1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A float `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.mean_tensor(
values=values,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.accuracy. Note that the order '
'of the labels and predictions arguments has been switched.')
def streaming_accuracy(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates how often `predictions` matches `labels`.
The `streaming_accuracy` function creates two local variables, `total` and
`count` that are used to compute the frequency with which `predictions`
matches `labels`. This frequency is ultimately returned as `accuracy`: an
idempotent operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `accuracy`.
Internally, an `is_correct` operation computes a `Tensor` with elements 1.0
where the corresponding elements of `predictions` and `labels` match and 0.0
otherwise. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `is_correct`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of any shape.
labels: The ground truth values, a `Tensor` whose shape matches
`predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `accuracy` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
accuracy: A `Tensor` representing the accuracy, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `accuracy`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.accuracy(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.precision. Note that the order '
'of the labels and predictions arguments has been switched.')
def streaming_precision(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the precision of the predictions with respect to the labels.
The `streaming_precision` function creates two local variables,
`true_positives` and `false_positives`, that are used to compute the
precision. This value is ultimately returned as `precision`, an idempotent
operation that simply divides `true_positives` by the sum of `true_positives`
and `false_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`. `update_op` weights each prediction by the corresponding value in
`weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: Scalar float `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately and whose value matches
`precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.precision(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.recall. Note that the order '
'of the labels and predictions arguments has been switched.')
def streaming_recall(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the recall of the predictions with respect to the labels.
The `streaming_recall` function creates two local variables, `true_positives`
and `false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` that updates these variables and returns the `recall`. `update_op`
weights each prediction by the corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: Scalar float `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately and whose value matches
`recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.recall(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_false_positive_rate(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the false positive rate of predictions with respect to labels.
The `false_positive_rate` function creates two local variables,
`false_positives` and `true_negatives`, that are used to compute the
false positive rate. This value is ultimately returned as
`false_positive_rate`, an idempotent operation that simply divides
`false_positives` by the sum of `false_positives` and `true_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`. `update_op` weights each prediction by the
corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_positive_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_positive_rate: Scalar float `Tensor` with the value of
`false_positives` divided by the sum of `false_positives` and
`true_negatives`.
update_op: `Operation` that increments `false_positives` and
`true_negatives` variables appropriately and whose value matches
`false_positive_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_positive_rate',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
false_p, false_positives_update_op = metrics.false_positives(
labels=labels,
predictions=predictions,
weights=weights,
metrics_collections=None,
updates_collections=None,
name=None)
true_n, true_negatives_update_op = metrics.true_negatives(
labels=labels,
predictions=predictions,
weights=weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_fpr(fp, tn, name):
return array_ops.where(
math_ops.greater(fp + tn, 0), math_ops.div(fp, fp + tn), 0, name)
fpr = compute_fpr(false_p, true_n, 'value')
update_op = compute_fpr(false_positives_update_op, true_negatives_update_op,
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fpr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fpr, update_op
def streaming_false_negative_rate(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the false negative rate of predictions with respect to labels.
The `false_negative_rate` function creates two local variables,
`false_negatives` and `true_positives`, that are used to compute the
false positive rate. This value is ultimately returned as
`false_negative_rate`, an idempotent operation that simply divides
`false_negatives` by the sum of `false_negatives` and `true_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_negative_rate`. `update_op` weights each prediction by the
corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_negative_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_negative_rate: Scalar float `Tensor` with the value of
`false_negatives` divided by the sum of `false_negatives` and
`true_positives`.
update_op: `Operation` that increments `false_negatives` and
`true_positives` variables appropriately and whose value matches
`false_negative_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_negative_rate',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
false_n, false_negatives_update_op = metrics.false_negatives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
true_p, true_positives_update_op = metrics.true_positives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_fnr(fn, tp, name):
return array_ops.where(
math_ops.greater(fn + tp, 0), math_ops.div(fn, fn + tp), 0, name)
fnr = compute_fnr(false_n, true_p, 'value')
update_op = compute_fnr(false_negatives_update_op, true_positives_update_op,
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fnr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fnr, update_op
def _streaming_confusion_matrix_at_thresholds(predictions,
labels,
thresholds,
weights=None,
includes=None):
"""Computes true_positives, false_negatives, true_negatives, false_positives.
This function creates up to four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives`.
`true_positive[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `True`.
`false_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `True`.
`true_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `False`.
`false_positives[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `False`.
For estimation of these metrics over a stream of data, for each metric the
function respectively creates an `update_op` operation that updates the
variable and returns its value.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `Tensor` whose shape matches `predictions`. `labels` will be cast
to `bool`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
includes: Tuple of keys to return, from 'tp', 'fn', 'tn', fp'. If `None`,
default to all four.
Returns:
values: Dict of variables of shape `[len(thresholds)]`. Keys are from
`includes`.
update_ops: Dict of operations that increments the `values`. Keys are from
`includes`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
all_includes = ('tp', 'fn', 'tn', 'fp')
if includes is None:
includes = all_includes
else:
for include in includes:
if include not in all_includes:
raise ValueError('Invalid key: %s.' % include)
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
num_thresholds = len(thresholds)
# Reshape predictions and labels.
predictions_2d = array_ops.reshape(predictions, [-1, 1])
labels_2d = array_ops.reshape(
math_ops.cast(labels, dtype=dtypes.bool), [1, -1])
# Use static shape if known.
num_predictions = predictions_2d.get_shape().as_list()[0]
# Otherwise use dynamic shape.
if num_predictions is None:
num_predictions = array_ops.shape(predictions_2d)[0]
thresh_tiled = array_ops.tile(
array_ops.expand_dims(array_ops.constant(thresholds), [1]),
array_ops.stack([1, num_predictions]))
# Tile the predictions after thresholding them across different thresholds.
pred_is_pos = math_ops.greater(
array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),
thresh_tiled)
if ('fn' in includes) or ('tn' in includes):
pred_is_neg = math_ops.logical_not(pred_is_pos)
# Tile labels by number of thresholds
label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])
if ('fp' in includes) or ('tn' in includes):
label_is_neg = math_ops.logical_not(label_is_pos)
if weights is not None:
broadcast_weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), predictions)
weights_tiled = array_ops.tile(
array_ops.reshape(broadcast_weights, [1, -1]), [num_thresholds, 1])
thresh_tiled.get_shape().assert_is_compatible_with(
weights_tiled.get_shape())
else:
weights_tiled = None
values = {}
update_ops = {}
if 'tp' in includes:
true_positives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='true_positives')
is_true_positive = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_pos))
if weights_tiled is not None:
is_true_positive *= weights_tiled
update_ops['tp'] = state_ops.assign_add(true_positives,
math_ops.reduce_sum(
is_true_positive, 1))
values['tp'] = true_positives
if 'fn' in includes:
false_negatives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='false_negatives')
is_false_negative = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_neg))
if weights_tiled is not None:
is_false_negative *= weights_tiled
update_ops['fn'] = state_ops.assign_add(false_negatives,
math_ops.reduce_sum(
is_false_negative, 1))
values['fn'] = false_negatives
if 'tn' in includes:
true_negatives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='true_negatives')
is_true_negative = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_neg))
if weights_tiled is not None:
is_true_negative *= weights_tiled
update_ops['tn'] = state_ops.assign_add(true_negatives,
math_ops.reduce_sum(
is_true_negative, 1))
values['tn'] = true_negatives
if 'fp' in includes:
false_positives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='false_positives')
is_false_positive = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_pos))
if weights_tiled is not None:
is_false_positive *= weights_tiled
update_ops['fp'] = state_ops.assign_add(false_positives,
math_ops.reduce_sum(
is_false_positive, 1))
values['fp'] = false_positives
return values, update_ops
def streaming_true_positives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('tp',))
return values['tp'], update_ops['tp']
def streaming_false_negatives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('fn',))
return values['fn'], update_ops['fn']
def streaming_false_positives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('fp',))
return values['fp'], update_ops['fp']
def streaming_true_negatives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('tn',))
return values['tn'], update_ops['tn']
def streaming_curve_points(labels=None,
predictions=None,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
curve='ROC',
name=None):
"""Computes curve (ROC or PR) values for a prespecified number of points.
The `streaming_curve_points` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
that are used to compute the curve values. To discretize the curve, a linearly
spaced set of thresholds is used to compute pairs of recall and precision
values.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
Returns:
points: A `Tensor` with shape [num_thresholds, 2] that contains points of
the curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
TODO(chizeng): Consider rewriting this method to make use of logic within the
precision_recall_at_equal_thresholds method (to improve run time).
"""
with variable_scope.variable_scope(name, 'curve_points',
(labels, predictions, weights)):
if curve != 'ROC' and curve != 'PR':
raise ValueError('curve must be either ROC or PR, %s unknown' % (curve))
kepsilon = _EPSILON # to account for floating point imprecisions
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
# Add epsilons to avoid dividing by 0.
epsilon = 1.0e-6
def compute_points(tp, fn, tn, fp):
"""Computes the roc-auc or pr-auc based on confusion counts."""
rec = math_ops.div(tp + epsilon, tp + fn + epsilon)
if curve == 'ROC':
fp_rate = math_ops.div(fp, fp + tn + epsilon)
return fp_rate, rec
else: # curve == 'PR'.
prec = math_ops.div(tp + epsilon, tp + fp + epsilon)
return rec, prec
xs, ys = compute_points(values['tp'], values['fn'], values['tn'],
values['fp'])
points = array_ops.stack([xs, ys], axis=1)
update_op = control_flow_ops.group(*update_ops.values())
if metrics_collections:
ops.add_to_collections(metrics_collections, points)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return points, update_op
@deprecated(None, 'Please switch to tf.metrics.auc. Note that the order of '
'the labels and predictions arguments has been switched.')
def streaming_auc(predictions,
labels,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
curve='ROC',
name=None):
"""Computes the approximate AUC via a Riemann sum.
The `streaming_auc` function creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the AUC. To discretize the AUC curve, a linearly spaced set of
thresholds is used to compute pairs of recall and precision values. The area
under the ROC-curve is therefore computed using the height of the recall
values by the false positive rate, while the area under the PR-curve is the
computed using the height of the precision values by the recall.
This value is ultimately returned as `auc`, an idempotent operation that
computes the area under a discretized curve of precision versus recall values
(computed using the aforementioned variables). The `num_thresholds` variable
controls the degree of discretization with larger numbers of thresholds more
closely approximating the true AUC. The quality of the approximation may vary
dramatically depending on `num_thresholds`.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
approximation may be poor if this is not the case.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `auc`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
Returns:
auc: A scalar `Tensor` representing the current area-under-curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `auc`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.auc(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
num_thresholds=num_thresholds,
curve=curve,
updates_collections=updates_collections,
name=name)
def _compute_dynamic_auc(labels, predictions, curve='ROC', weights=None):
"""Computes the apporixmate AUC by a Riemann sum with data-derived thresholds.
Computes the area under the ROC or PR curve using each prediction as a
threshold. This could be slow for large batches, but has the advantage of not
having its results degrade depending on the distribution of predictions.
Args:
labels: A `Tensor` of ground truth labels with the same shape as
`predictions` with values of 0 or 1 and type `int64`.
predictions: A 1-D `Tensor` of predictions whose values are `float64`.
curve: The name of the curve to be computed, 'ROC' for the Receiving
Operating Characteristic or 'PR' for the Precision-Recall curve.
weights: A 1-D `Tensor` of weights whose values are `float64`.
Returns:
A scalar `Tensor` containing the area-under-curve value for the input.
"""
# Compute the total weight and the total positive weight.
size = array_ops.size(predictions)
if weights is None:
weights = array_ops.ones_like(labels, dtype=dtypes.float64)
labels, predictions, weights = metrics_impl._remove_squeezable_dimensions(
labels, predictions, weights)
total_weight = math_ops.reduce_sum(weights)
total_positive = math_ops.reduce_sum(
array_ops.where(
math_ops.greater(labels, 0), weights,
array_ops.zeros_like(labels, dtype=dtypes.float64)))
def continue_computing_dynamic_auc():
"""Continues dynamic auc computation, entered if labels are not all equal.
Returns:
A scalar `Tensor` containing the area-under-curve value.
"""
# Sort the predictions descending, keeping the same order for the
# corresponding labels and weights.
ordered_predictions, indices = nn.top_k(predictions, k=size)
ordered_labels = array_ops.gather(labels, indices)
ordered_weights = array_ops.gather(weights, indices)
# Get the counts of the unique ordered predictions.
_, _, counts = array_ops.unique_with_counts(ordered_predictions)
# Compute the indices of the split points between different predictions.
splits = math_ops.cast(
array_ops.pad(math_ops.cumsum(counts), paddings=[[1, 0]]), dtypes.int32)
# Count the positives to the left of the split indices.
true_positives = array_ops.gather(
array_ops.pad(
math_ops.cumsum(
array_ops.where(
math_ops.greater(ordered_labels, 0), ordered_weights,
array_ops.zeros_like(ordered_labels,
dtype=dtypes.float64))),
paddings=[[1, 0]]), splits)
if curve == 'ROC':
# Compute the weight of the negatives to the left of every split point and
# the total weight of the negatives number of negatives for computing the
# FPR.
false_positives = array_ops.gather(
array_ops.pad(
math_ops.cumsum(
array_ops.where(
math_ops.less(ordered_labels, 1), ordered_weights,
array_ops.zeros_like(
ordered_labels, dtype=dtypes.float64))),
paddings=[[1, 0]]), splits)
total_negative = total_weight - total_positive
x_axis_values = math_ops.truediv(false_positives, total_negative)
y_axis_values = math_ops.truediv(true_positives, total_positive)
elif curve == 'PR':
x_axis_values = math_ops.truediv(true_positives, total_positive)
# For conformance, set precision to 1 when the number of positive
# classifications is 0.
positives = array_ops.gather(
array_ops.pad(math_ops.cumsum(ordered_weights), paddings=[[1, 0]]),
splits)
y_axis_values = array_ops.where(
math_ops.greater(splits, 0),
math_ops.truediv(true_positives, positives),
array_ops.ones_like(true_positives, dtype=dtypes.float64))
# Calculate trapezoid areas.
heights = math_ops.add(y_axis_values[1:], y_axis_values[:-1]) / 2.0
widths = math_ops.abs(
math_ops.subtract(x_axis_values[1:], x_axis_values[:-1]))
return math_ops.reduce_sum(math_ops.multiply(heights, widths))
# If all the labels are the same, AUC isn't well-defined (but raising an
# exception seems excessive) so we return 0, otherwise we finish computing.
return control_flow_ops.cond(
math_ops.logical_or(
math_ops.equal(total_positive, 0), math_ops.equal(
total_positive, total_weight)),
true_fn=lambda: array_ops.constant(0, dtypes.float64),
false_fn=continue_computing_dynamic_auc)
def streaming_dynamic_auc(labels,
predictions,
curve='ROC',
metrics_collections=(),
updates_collections=(),
name=None,
weights=None):
"""Computes the apporixmate AUC by a Riemann sum with data-derived thresholds.
USAGE NOTE: this approach requires storing all of the predictions and labels
for a single evaluation in memory, so it may not be usable when the evaluation
batch size and/or the number of evaluation steps is very large.
Computes the area under the ROC or PR curve using each prediction as a
threshold. This has the advantage of being resilient to the distribution of
predictions by aggregating across batches, accumulating labels and predictions
and performing the final calculation using all of the concatenated values.
Args:
labels: A `Tensor` of ground truth labels with the same shape as `labels`
and with values of 0 or 1 whose values are castable to `int64`.
predictions: A `Tensor` of predictions whose values are castable to
`float64`. Will be flattened into a 1-D `Tensor`.
curve: The name of the curve for which to compute AUC, 'ROC' for the
Receiving Operating Characteristic or 'PR' for the Precision-Recall curve.
metrics_collections: An optional iterable of collections that `auc` should
be added to.
updates_collections: An optional iterable of collections that `update_op`
should be added to.
name: An optional name for the variable_scope that contains the metric
variables.
weights: A 'Tensor' of non-negative weights whose values are castable to
`float64`. Will be flattened into a 1-D `Tensor`.
Returns:
auc: A scalar `Tensor` containing the current area-under-curve value.
update_op: An operation that concatenates the input labels and predictions
to the accumulated values.
Raises:
ValueError: If `labels` and `predictions` have mismatched shapes or if
`curve` isn't a recognized curve type.
"""
if curve not in ['PR', 'ROC']:
raise ValueError('curve must be either ROC or PR, %s unknown' % curve)
with variable_scope.variable_scope(name, default_name='dynamic_auc'):
labels.get_shape().assert_is_compatible_with(predictions.get_shape())
predictions = array_ops.reshape(
math_ops.cast(predictions, dtypes.float64), [-1])
labels = array_ops.reshape(math_ops.cast(labels, dtypes.int64), [-1])
with ops.control_dependencies([
check_ops.assert_greater_equal(
labels,
array_ops.zeros_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is <0'),
check_ops.assert_less_equal(
labels,
array_ops.ones_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is >1'),
]):
preds_accum, update_preds = streaming_concat(
predictions, name='concat_preds')
labels_accum, update_labels = streaming_concat(
labels, name='concat_labels')
if weights is not None:
weights = array_ops.reshape(
math_ops.cast(weights, dtypes.float64), [-1])
weights_accum, update_weights = streaming_concat(
weights, name='concat_weights')
update_op = control_flow_ops.group(update_labels, update_preds,
update_weights)
else:
weights_accum = None
update_op = control_flow_ops.group(update_labels, update_preds)
auc = _compute_dynamic_auc(
labels_accum, preds_accum, curve=curve, weights=weights_accum)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, auc)
return auc, update_op
def _compute_placement_auc(labels, predictions, weights, alpha,
logit_transformation, is_valid):
"""Computes the AUC and asymptotic normally distributed confidence interval.
The calculations are achieved using the fact that AUC = P(Y_1>Y_0) and the
concept of placement values for each labeled group, as presented by Delong and
Delong (1988). The actual algorithm used is a more computationally efficient
approach presented by Sun and Xu (2014). This could be slow for large batches,
but has the advantage of not having its results degrade depending on the
distribution of predictions.
Args:
labels: A `Tensor` of ground truth labels with the same shape as
`predictions` with values of 0 or 1 and type `int64`.
predictions: A 1-D `Tensor` of predictions whose values are `float64`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`.
alpha: Confidence interval level desired.
logit_transformation: A boolean value indicating whether the estimate should
be logit transformed prior to calculating the confidence interval. Doing
so enforces the restriction that the AUC should never be outside the
interval [0,1].
is_valid: A bool tensor describing whether the input is valid.
Returns:
A 1-D `Tensor` containing the area-under-curve, lower, and upper confidence
interval values.
"""
# Disable the invalid-name checker so that we can capitalize the name.
# pylint: disable=invalid-name
AucData = collections_lib.namedtuple('AucData', ['auc', 'lower', 'upper'])
# pylint: enable=invalid-name
# If all the labels are the same or if number of observations are too few,
# AUC isn't well-defined
size = array_ops.size(predictions, out_type=dtypes.int32)
# Count the total number of positive and negative labels in the input.
total_0 = math_ops.reduce_sum(
math_ops.cast(1 - labels, weights.dtype) * weights)
total_1 = math_ops.reduce_sum(
math_ops.cast(labels, weights.dtype) * weights)
# Sort the predictions ascending, as well as
# (i) the corresponding labels and
# (ii) the corresponding weights.
ordered_predictions, indices = nn.top_k(predictions, k=size, sorted=True)
ordered_predictions = array_ops.reverse(
ordered_predictions, axis=array_ops.zeros(1, dtypes.int32))
indices = array_ops.reverse(indices, axis=array_ops.zeros(1, dtypes.int32))
ordered_labels = array_ops.gather(labels, indices)
ordered_weights = array_ops.gather(weights, indices)
# We now compute values required for computing placement values.
# We generate a list of indices (segmented_indices) of increasing order. An
# index is assigned for each unique prediction float value. Prediction
# values that are the same share the same index.
_, segmented_indices = array_ops.unique(ordered_predictions)
# We create 2 tensors of weights. weights_for_true is non-zero for true
# labels. weights_for_false is non-zero for false labels.
float_labels_for_true = math_ops.cast(ordered_labels, dtypes.float32)
float_labels_for_false = 1.0 - float_labels_for_true
weights_for_true = ordered_weights * float_labels_for_true
weights_for_false = ordered_weights * float_labels_for_false
# For each set of weights with the same segmented indices, we add up the
# weight values. Note that for each label, we deliberately rely on weights
# for the opposite label.
weight_totals_for_true = math_ops.segment_sum(weights_for_false,
segmented_indices)
weight_totals_for_false = math_ops.segment_sum(weights_for_true,
segmented_indices)
# These cumulative sums of weights importantly exclude the current weight
# sums.
cum_weight_totals_for_true = math_ops.cumsum(weight_totals_for_true,
exclusive=True)
cum_weight_totals_for_false = math_ops.cumsum(weight_totals_for_false,
exclusive=True)
# Compute placement values using the formula. Values with the same segmented
# indices and labels share the same placement values.
placements_for_true = (
(cum_weight_totals_for_true + weight_totals_for_true / 2.0) /
(math_ops.reduce_sum(weight_totals_for_true) + _EPSILON))
placements_for_false = (
(cum_weight_totals_for_false + weight_totals_for_false / 2.0) /
(math_ops.reduce_sum(weight_totals_for_false) + _EPSILON))
# We expand the tensors of placement values (for each label) so that their
# shapes match that of predictions.
placements_for_true = array_ops.gather(placements_for_true, segmented_indices)
placements_for_false = array_ops.gather(placements_for_false,
segmented_indices)
# Select placement values based on the label for each index.
placement_values = (
placements_for_true * float_labels_for_true +
placements_for_false * float_labels_for_false)
# Split placement values by labeled groups.
placement_values_0 = placement_values * math_ops.cast(
1 - ordered_labels, weights.dtype)
weights_0 = ordered_weights * math_ops.cast(
1 - ordered_labels, weights.dtype)
placement_values_1 = placement_values * math_ops.cast(
ordered_labels, weights.dtype)
weights_1 = ordered_weights * math_ops.cast(
ordered_labels, weights.dtype)
# Calculate AUC using placement values
auc_0 = (math_ops.reduce_sum(weights_0 * (1. - placement_values_0)) /
(total_0 + _EPSILON))
auc_1 = (math_ops.reduce_sum(weights_1 * (placement_values_1)) /
(total_1 + _EPSILON))
auc = array_ops.where(math_ops.less(total_0, total_1), auc_1, auc_0)
# Calculate variance and standard error using the placement values.
var_0 = (
math_ops.reduce_sum(
weights_0 * math_ops.square(1. - placement_values_0 - auc_0)) /
(total_0 - 1. + _EPSILON))
var_1 = (
math_ops.reduce_sum(
weights_1 * math_ops.square(placement_values_1 - auc_1)) /
(total_1 - 1. + _EPSILON))
auc_std_err = math_ops.sqrt(
(var_0 / (total_0 + _EPSILON)) + (var_1 / (total_1 + _EPSILON)))
# Calculate asymptotic normal confidence intervals
std_norm_dist = Normal(loc=0., scale=1.)
z_value = std_norm_dist.quantile((1.0 - alpha) / 2.0)
if logit_transformation:
estimate = math_ops.log(auc / (1. - auc + _EPSILON))
std_err = auc_std_err / (auc * (1. - auc + _EPSILON))
transformed_auc_lower = estimate + (z_value * std_err)
transformed_auc_upper = estimate - (z_value * std_err)
def inverse_logit_transformation(x):
exp_negative = math_ops.exp(math_ops.negative(x))
return 1. / (1. + exp_negative + _EPSILON)
auc_lower = inverse_logit_transformation(transformed_auc_lower)
auc_upper = inverse_logit_transformation(transformed_auc_upper)
else:
estimate = auc
std_err = auc_std_err
auc_lower = estimate + (z_value * std_err)
auc_upper = estimate - (z_value * std_err)
## If estimate is 1 or 0, no variance is present so CI = 1
## n.b. This can be misleading, since number obs can just be too low.
lower = array_ops.where(
math_ops.logical_or(
math_ops.equal(auc, array_ops.ones_like(auc)),
math_ops.equal(auc, array_ops.zeros_like(auc))),
auc, auc_lower)
upper = array_ops.where(
math_ops.logical_or(
math_ops.equal(auc, array_ops.ones_like(auc)),
math_ops.equal(auc, array_ops.zeros_like(auc))),
auc, auc_upper)
# If all the labels are the same, AUC isn't well-defined (but raising an
# exception seems excessive) so we return 0, otherwise we finish computing.
trivial_value = array_ops.constant(0.0)
return AucData(*control_flow_ops.cond(
is_valid, lambda: [auc, lower, upper], lambda: [trivial_value]*3))
def auc_with_confidence_intervals(labels,
predictions,
weights=None,
alpha=0.95,
logit_transformation=True,
metrics_collections=(),
updates_collections=(),
name=None):
"""Computes the AUC and asymptotic normally distributed confidence interval.
USAGE NOTE: this approach requires storing all of the predictions and labels
for a single evaluation in memory, so it may not be usable when the evaluation
batch size and/or the number of evaluation steps is very large.
Computes the area under the ROC curve and its confidence interval using
placement values. This has the advantage of being resilient to the
distribution of predictions by aggregating across batches, accumulating labels
and predictions and performing the final calculation using all of the
concatenated values.
Args:
labels: A `Tensor` of ground truth labels with the same shape as `labels`
and with values of 0 or 1 whose values are castable to `int64`.
predictions: A `Tensor` of predictions whose values are castable to
`float64`. Will be flattened into a 1-D `Tensor`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`.
alpha: Confidence interval level desired.
logit_transformation: A boolean value indicating whether the estimate should
be logit transformed prior to calculating the confidence interval. Doing
so enforces the restriction that the AUC should never be outside the
interval [0,1].
metrics_collections: An optional iterable of collections that `auc` should
be added to.
updates_collections: An optional iterable of collections that `update_op`
should be added to.
name: An optional name for the variable_scope that contains the metric
variables.
Returns:
auc: A 1-D `Tensor` containing the current area-under-curve, lower, and
upper confidence interval values.
update_op: An operation that concatenates the input labels and predictions
to the accumulated values.
Raises:
ValueError: If `labels`, `predictions`, and `weights` have mismatched shapes
or if `alpha` isn't in the range (0,1).
"""
if not (alpha > 0 and alpha < 1):
raise ValueError('alpha must be between 0 and 1; currently %.02f' % alpha)
if weights is None:
weights = array_ops.ones_like(predictions)
with variable_scope.variable_scope(
name,
default_name='auc_with_confidence_intervals',
values=[labels, predictions, weights]):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions,
labels=labels,
weights=weights)
total_weight = math_ops.reduce_sum(weights)
weights = array_ops.reshape(weights, [-1])
predictions = array_ops.reshape(
math_ops.cast(predictions, dtypes.float64), [-1])
labels = array_ops.reshape(math_ops.cast(labels, dtypes.int64), [-1])
with ops.control_dependencies([
check_ops.assert_greater_equal(
labels,
array_ops.zeros_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is <0'),
check_ops.assert_less_equal(
labels,
array_ops.ones_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is >1'),
]):
preds_accum, update_preds = streaming_concat(
predictions, name='concat_preds')
labels_accum, update_labels = streaming_concat(labels,
name='concat_labels')
weights_accum, update_weights = streaming_concat(
weights, name='concat_weights')
update_op_for_valid_case = control_flow_ops.group(
update_labels, update_preds, update_weights)
# Only perform updates if this case is valid.
all_labels_positive_or_0 = math_ops.logical_and(
math_ops.equal(math_ops.reduce_min(labels), 0),
math_ops.equal(math_ops.reduce_max(labels), 1))
sums_of_weights_at_least_1 = math_ops.greater_equal(total_weight, 1.0)
is_valid = math_ops.logical_and(all_labels_positive_or_0,
sums_of_weights_at_least_1)
update_op = control_flow_ops.cond(
sums_of_weights_at_least_1,
lambda: update_op_for_valid_case, control_flow_ops.no_op)
auc = _compute_placement_auc(
labels_accum,
preds_accum,
weights_accum,
alpha=alpha,
logit_transformation=logit_transformation,
is_valid=is_valid)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, auc)
return auc, update_op
def precision_recall_at_equal_thresholds(labels,
predictions,
weights=None,
num_thresholds=None,
use_locking=None,
name=None):
"""A helper method for creating metrics related to precision-recall curves.
These values are true positives, false negatives, true negatives, false
positives, precision, and recall. This function returns a data structure that
contains ops within it.
Unlike _streaming_confusion_matrix_at_thresholds (which exhibits O(T * N)
space and run time), this op exhibits O(T + N) space and run time, where T is
the number of thresholds and N is the size of the predictions tensor. Hence,
it may be advantageous to use this function when `predictions` is big.
For instance, prefer this method for per-pixel classification tasks, for which
the predictions tensor may be very large.
Each number in `predictions`, a float in `[0, 1]`, is compared with its
corresponding label in `labels`, and counts as a single tp/fp/tn/fn value at
each threshold. This is then multiplied with `weights` which can be used to
reweight certain values, or more commonly used for masking values.
Args:
labels: A bool `Tensor` whose shape matches `predictions`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional; If provided, a `Tensor` that has the same dtype as,
and broadcastable to, `predictions`. This tensor is multiplied by counts.
num_thresholds: Optional; Number of thresholds, evenly distributed in
`[0, 1]`. Should be `>= 2`. Defaults to 201. Note that the number of bins
is 1 less than `num_thresholds`. Using an even `num_thresholds` value
instead of an odd one may yield unfriendly edges for bins.
use_locking: Optional; If True, the op will be protected by a lock.
Otherwise, the behavior is undefined, but may exhibit less contention.
Defaults to True.
name: Optional; variable_scope name. If not provided, the string
'precision_recall_at_equal_threshold' is used.
Returns:
result: A named tuple (See PrecisionRecallData within the implementation of
this function) with properties that are variables of shape
`[num_thresholds]`. The names of the properties are tp, fp, tn, fn,
precision, recall, thresholds. Types are same as that of predictions.
update_op: An op that accumulates values.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
# Disable the invalid-name checker so that we can capitalize the name.
# pylint: disable=invalid-name
PrecisionRecallData = collections_lib.namedtuple(
'PrecisionRecallData',
['tp', 'fp', 'tn', 'fn', 'precision', 'recall', 'thresholds'])
# pylint: enable=invalid-name
if num_thresholds is None:
num_thresholds = 201
if weights is None:
weights = 1.0
if use_locking is None:
use_locking = True
check_ops.assert_type(labels, dtypes.bool)
with variable_scope.variable_scope(name,
'precision_recall_at_equal_thresholds',
(labels, predictions, weights)):
# Make sure that predictions are within [0.0, 1.0].
with ops.control_dependencies([
check_ops.assert_greater_equal(
predictions,
math_ops.cast(0.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]'),
check_ops.assert_less_equal(
predictions,
math_ops.cast(1.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]')
]):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions,
labels=labels,
weights=weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
# It's important we aggregate using float64 since we're accumulating a lot
# of 1.0's for the true/false labels, and accumulating to float32 will
# be quite inaccurate even with just a modest amount of values (~20M).
# We use float64 instead of integer primarily since GPU scatter kernel
# only support floats.
agg_dtype = dtypes.float64
f_labels = math_ops.cast(labels, agg_dtype)
weights = math_ops.cast(weights, agg_dtype)
true_labels = f_labels * weights
false_labels = (1.0 - f_labels) * weights
# Flatten predictions and labels.
predictions = array_ops.reshape(predictions, [-1])
true_labels = array_ops.reshape(true_labels, [-1])
false_labels = array_ops.reshape(false_labels, [-1])
# To compute TP/FP/TN/FN, we are measuring a binary classifier
# C(t) = (predictions >= t)
# at each threshold 't'. So we have
# TP(t) = sum( C(t) * true_labels )
# FP(t) = sum( C(t) * false_labels )
#
# But, computing C(t) requires computation for each t. To make it fast,
# observe that C(t) is a cumulative integral, and so if we have
# thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1}
# where n = num_thresholds, and if we can compute the bucket function
# B(i) = Sum( (predictions == t), t_i <= t < t{i+1} )
# then we get
# C(t_i) = sum( B(j), j >= i )
# which is the reversed cumulative sum in tf.cumsum().
#
# We can compute B(i) efficiently by taking advantage of the fact that
# our thresholds are evenly distributed, in that
# width = 1.0 / (num_thresholds - 1)
# thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
# Given a prediction value p, we can map it to its bucket by
# bucket_index(p) = floor( p * (num_thresholds - 1) )
# so we can use tf.scatter_add() to update the buckets in one pass.
#
# This implementation exhibits a run time and space complexity of O(T + N),
# where T is the number of thresholds and N is the size of predictions.
# Metrics that rely on _streaming_confusion_matrix_at_thresholds instead
# exhibit a complexity of O(T * N).
# Compute the bucket indices for each prediction value.
bucket_indices = math_ops.cast(
math_ops.floor(predictions * (num_thresholds - 1)), dtypes.int32)
with ops.name_scope('variables'):
tp_buckets_v = metrics_impl.metric_variable(
[num_thresholds], agg_dtype, name='tp_buckets')
fp_buckets_v = metrics_impl.metric_variable(
[num_thresholds], agg_dtype, name='fp_buckets')
with ops.name_scope('update_op'):
update_tp = state_ops.scatter_add(
tp_buckets_v, bucket_indices, true_labels, use_locking=use_locking)
update_fp = state_ops.scatter_add(
fp_buckets_v, bucket_indices, false_labels, use_locking=use_locking)
# Set up the cumulative sums to compute the actual metrics.
tp = math_ops.cumsum(tp_buckets_v, reverse=True, name='tp')
fp = math_ops.cumsum(fp_buckets_v, reverse=True, name='fp')
# fn = sum(true_labels) - tp
# = sum(tp_buckets) - tp
# = tp[0] - tp
# Similarly,
# tn = fp[0] - fp
tn = fp[0] - fp
fn = tp[0] - tp
# We use a minimum to prevent division by 0.
epsilon = ops.convert_to_tensor(1e-7, dtype=agg_dtype)
precision = tp / math_ops.maximum(epsilon, tp + fp)
recall = tp / math_ops.maximum(epsilon, tp + fn)
# Convert all tensors back to predictions' dtype (as per function contract).
out_dtype = predictions.dtype
_convert = lambda tensor: math_ops.cast(tensor, out_dtype)
result = PrecisionRecallData(
tp=_convert(tp),
fp=_convert(fp),
tn=_convert(tn),
fn=_convert(fn),
precision=_convert(precision),
recall=_convert(recall),
thresholds=_convert(math_ops.lin_space(0.0, 1.0, num_thresholds)))
update_op = control_flow_ops.group(update_tp, update_fp)
return result, update_op
def streaming_specificity_at_sensitivity(predictions,
labels,
sensitivity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the specificity at a given sensitivity.
The `streaming_specificity_at_sensitivity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the specificity at the given
sensitivity value. The threshold for the given sensitivity value is computed
and used to evaluate the corresponding specificity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`specificity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
sensitivity: A scalar value in range `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
sensitivity.
metrics_collections: An optional list of collections that `specificity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
specificity: A scalar `Tensor` representing the specificity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `specificity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`sensitivity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
return metrics.specificity_at_sensitivity(
sensitivity=sensitivity,
num_thresholds=num_thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_sensitivity_at_specificity(predictions,
labels,
specificity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the sensitivity at a given specificity.
The `streaming_sensitivity_at_specificity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the sensitivity at the given
specificity value. The threshold for the given specificity value is computed
and used to evaluate the corresponding sensitivity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`sensitivity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
specificity: A scalar value in range `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
specificity.
metrics_collections: An optional list of collections that `sensitivity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
sensitivity: A scalar `Tensor` representing the sensitivity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `sensitivity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`specificity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
return metrics.sensitivity_at_specificity(
specificity=specificity,
num_thresholds=num_thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.precision_at_thresholds. Note that '
'the order of the labels and predictions arguments are switched.')
def streaming_precision_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision values for different `thresholds` on `predictions`.
The `streaming_precision_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `precision[i]` is defined as the total
weight of values in `predictions` above `thresholds[i]` whose corresponding
entry in `labels` is `True`, divided by the total weight of values in
`predictions` above `thresholds[i]` (`true_positives[i] / (true_positives[i] +
false_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.precision_at_thresholds(
thresholds=thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.recall_at_thresholds. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_recall_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various recall values for different `thresholds` on `predictions`.
The `streaming_recall_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `recall[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `True`, divided by the total weight of `True` values in `labels`
(`true_positives[i] / (true_positives[i] + false_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `recall`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.recall_at_thresholds(
thresholds=thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_false_positive_rate_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various fpr values for different `thresholds` on `predictions`.
The `streaming_false_positive_rate_at_thresholds` function creates two
local variables, `false_positives`, `true_negatives`, for various values of
thresholds. `false_positive_rate[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `False`, divided by the total weight of `False` values in `labels`
(`false_positives[i] / (false_positives[i] + true_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_positive_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_positive_rate: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `false_positives` and
`true_negatives` variables that are used in the computation of
`false_positive_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_positive_rate_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights, includes=('fp', 'tn'))
# Avoid division by zero.
epsilon = _EPSILON
def compute_fpr(fp, tn, name):
return math_ops.div(fp, epsilon + fp + tn, name='fpr_' + name)
fpr = compute_fpr(values['fp'], values['tn'], 'value')
update_op = compute_fpr(update_ops['fp'], update_ops['tn'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fpr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fpr, update_op
def streaming_false_negative_rate_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various fnr values for different `thresholds` on `predictions`.
The `streaming_false_negative_rate_at_thresholds` function creates two
local variables, `false_negatives`, `true_positives`, for various values of
thresholds. `false_negative_rate[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `False`, divided by the total weight of `True` values in `labels`
(`false_negatives[i] / (false_negatives[i] + true_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_negative_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_negative_rate: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `false_negatives` and
`true_positives` variables that are used in the computation of
`false_negative_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_negative_rate_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights, includes=('fn', 'tp'))
# Avoid division by zero.
epsilon = _EPSILON
def compute_fnr(fn, tp, name):
return math_ops.div(fn, epsilon + fn + tp, name='fnr_' + name)
fnr = compute_fnr(values['fn'], values['tp'], 'value')
update_op = compute_fnr(update_ops['fn'], update_ops['tp'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fnr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fnr, update_op
def _at_k_name(name, k=None, class_id=None):
if k is not None:
name = '%s_at_%d' % (name, k)
else:
name = '%s_at_k' % (name)
if class_id is not None:
name = '%s_class%d' % (name, class_id)
return name
@deprecated('2016-11-08', 'Please use `streaming_sparse_recall_at_k`, '
'and reshape labels from [batch_size] to [batch_size, 1].')
def streaming_recall_at_k(predictions,
labels,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the recall@k of the predictions with respect to dense labels.
The `streaming_recall_at_k` function creates two local variables, `total` and
`count`, that are used to compute the recall@k frequency. This frequency is
ultimately returned as `recall_at_<k>`: an idempotent operation that simply
divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, an `in_top_k` operation computes a `Tensor` with
shape [batch_size] whose elements indicate whether or not the corresponding
label is in the top `k` `predictions`. Then `update_op` increments `total`
with the reduced sum of `weights` where `in_top_k` is `True`, and it
increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A float `Tensor` of dimension [batch_size, num_classes].
labels: A `Tensor` of dimension [batch_size] whose type is in `int32`,
`int64`.
k: The number of top elements to look at for computing recall.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall_at_k`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
recall_at_k: A `Tensor` representing the recall@k, the fraction of labels
which fall into the top `k` predictions.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `recall_at_k`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
in_top_k = math_ops.to_float(nn.in_top_k(predictions, labels, k))
return streaming_mean(in_top_k, weights, metrics_collections,
updates_collections, name or _at_k_name('recall', k))
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_recall_at_k(predictions,
labels,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of the predictions with respect to sparse labels.
If `class_id` is not specified, we'll calculate recall as the ratio of true
positives (i.e., correct predictions, items in the top `k` highest
`predictions` that are found in the corresponding row in `labels`) to
actual positives (the full `labels` row).
If `class_id` is specified, we calculate recall by considering only the rows
in the batch for which `class_id` is in `labels`, and computing the
fraction of them for which `class_id` is in the corresponding row in
`labels`.
`streaming_sparse_recall_at_k` creates two local variables,
`true_positive_at_<k>` and `false_negative_at_<k>`, that are used to compute
the recall_at_k frequency. This frequency is ultimately returned as
`recall_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false negatives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_negative_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`.
Values should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count
towards `false_negative_at_<k>`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
return metrics.recall_at_k(
k=k,
class_id=class_id,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_precision_at_k(predictions,
labels,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
If `class_id` is not specified, we calculate precision as the ratio of true
positives (i.e., correct predictions, items in the top `k` highest
`predictions` that are found in the corresponding row in `labels`) to
positives (all top `k` `predictions`).
If `class_id` is specified, we calculate precision by considering only the
rows in the batch for which `class_id` is in the top `k` highest
`predictions`, and computing the fraction of them for which `class_id` is
in the corresponding row in `labels`.
We expect precision to decrease as `k` increases.
`streaming_sparse_precision_at_k` creates two local variables,
`true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_positive_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range are ignored.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
return metrics.precision_at_k(
k=k,
class_id=class_id,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_precision_at_top_k(top_k_predictions,
labels,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of top-k predictions with respect to sparse labels.
If `class_id` is not specified, we calculate precision as the ratio of
true positives (i.e., correct predictions, items in `top_k_predictions`
that are found in the corresponding row in `labels`) to positives (all
`top_k_predictions`).
If `class_id` is specified, we calculate precision by considering only the
rows in the batch for which `class_id` is in the top `k` highest
`predictions`, and computing the fraction of them for which `class_id` is
in the corresponding row in `labels`.
We expect precision to decrease as `k` increases.
`streaming_sparse_precision_at_top_k` creates two local variables,
`true_positive_at_k` and `false_positive_at_k`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_k`: an idempotent operation that simply divides
`true_positive_at_k` by total (`true_positive_at_k` + `false_positive_at_k`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_k`. Internally, set operations applied to `top_k_predictions`
and `labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_k` and
`false_positive_at_k` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and top_k_predictions has shape [batch size, k].
The final dimension contains the indices of top-k labels. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`top_k_predictions`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range are ignored.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
ValueError: If `top_k_predictions` has rank < 2.
"""
default_name = _at_k_name('precision', class_id=class_id)
with ops.name_scope(name, default_name,
(top_k_predictions, labels, weights)) as name_scope:
return metrics_impl.precision_at_top_k(
labels=labels,
predictions_idx=top_k_predictions,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name_scope)
def sparse_recall_at_top_k(labels,
top_k_predictions,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of top-k predictions with respect to sparse labels.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing
the fraction of them for which `class_id` is in the top-k `predictions`.
If `class_id` is not specified, we'll calculate recall as how often on
average a class among the labels of a batch entry is in the top-k
`predictions`.
`sparse_recall_at_top_k` creates two local variables, `true_positive_at_<k>`
and `false_negative_at_<k>`, that are used to compute the recall_at_k
frequency. This frequency is ultimately returned as `recall_at_<k>`: an
idempotent operation that simply divides `true_positive_at_<k>` by total
(`true_positive_at_<k>` + `false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Set operations applied to `top_k` and `labels` calculate the
true positives and false negatives weighted by `weights`. Then `update_op`
increments `true_positive_at_<k>` and `false_negative_at_<k>` using these
values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`top_k_predictions`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range always count towards `false_negative_at_<k>`.
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and top_k_predictions has shape [batch size, k].
The final dimension contains the indices of top-k labels. [D1, ... DN]
must match `labels`.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
default_name = _at_k_name('recall', class_id=class_id)
with ops.name_scope(name, default_name,
(top_k_predictions, labels, weights)) as name_scope:
return metrics_impl.recall_at_top_k(
labels=labels,
predictions_idx=top_k_predictions,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name_scope)
def _compute_recall_at_precision(tp, fp, fn, precision, name,
strict_mode=False):
"""Helper function to compute recall at a given `precision`.
Args:
tp: The number of true positives.
fp: The number of false positives.
fn: The number of false negatives.
precision: The precision for which the recall will be calculated.
name: An optional variable_scope name.
strict_mode: If true and there exists a threshold where the precision is
no smaller than the target precision, return the corresponding recall at
the threshold. Otherwise, return 0. If false, find the threshold where the
precision is closest to the target precision and return the recall at the
threshold.
Returns:
The recall at a given `precision`.
"""
precisions = math_ops.div(tp, tp + fp + _EPSILON)
if not strict_mode:
tf_index = math_ops.argmin(
math_ops.abs(precisions - precision), 0, output_type=dtypes.int32)
# Now, we have the implicit threshold, so compute the recall:
return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + _EPSILON,
name)
else:
# We aim to find the threshold where the precision is minimum but no smaller
# than the target precision.
# The rationale:
# 1. Compute the difference between precisions (by different thresholds) and
# the target precision.
# 2. Take the reciprocal of the values by the above step. The intention is
# to make the positive values rank before negative values and also the
# smaller positives rank before larger positives.
tf_index = math_ops.argmax(
math_ops.div(1.0, precisions - precision + _EPSILON),
0,
output_type=dtypes.int32)
def _return_good_recall():
return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + _EPSILON,
name)
return control_flow_ops.cond(precisions[tf_index] >= precision,
_return_good_recall, lambda: .0)
def recall_at_precision(labels,
predictions,
precision,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None,
strict_mode=False):
"""Computes `recall` at `precision`.
The `recall_at_precision` function creates four local variables,
`tp` (true positives), `fp` (false positives) and `fn` (false negatives)
that are used to compute the `recall` at the given `precision` value. The
threshold for the given `precision` value is computed and used to evaluate the
corresponding `recall`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall`. `update_op` increments the `tp`, `fp` and `fn` counts with the
weight of each case found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
precision: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
`precision`.
metrics_collections: An optional list of collections that `recall`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
strict_mode: If true and there exists a threshold where the precision is
above the target precision, return the corresponding recall at the
threshold. Otherwise, return 0. If false, find the threshold where the
precision is closest to the target precision and return the recall at the
threshold.
Returns:
recall: A scalar `Tensor` representing the recall at the given
`precision` value.
update_op: An operation that increments the `tp`, `fp` and `fn`
variables appropriately and whose value matches `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`precision` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
if not 0 <= precision <= 1:
raise ValueError('`precision` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'recall_at_precision',
(predictions, labels, weights)):
thresholds = [
i * 1.0 / (num_thresholds - 1) for i in range(1, num_thresholds - 1)
]
thresholds = [0.0 - _EPSILON] + thresholds + [1.0 + _EPSILON]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights)
recall = _compute_recall_at_precision(values['tp'], values['fp'],
values['fn'], precision, 'value',
strict_mode)
update_op = _compute_recall_at_precision(update_ops['tp'], update_ops['fp'],
update_ops['fn'], precision,
'update_op', strict_mode)
if metrics_collections:
ops.add_to_collections(metrics_collections, recall)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return recall, update_op
def precision_at_recall(labels,
predictions,
target_recall,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the precision at a given recall.
This function creates variables to track the true positives, false positives,
true negatives, and false negatives at a set of thresholds. Among those
thresholds where recall is at least `target_recall`, precision is computed
at the threshold where recall is closest to `target_recall`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
precision at `target_recall`. `update_op` increments the counts of true
positives, false positives, true negatives, and false negatives with the
weight of each case found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about precision and recall, see
http://en.wikipedia.org/wiki/Precision_and_recall
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
target_recall: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
recall.
metrics_collections: An optional list of collections to which `precision`
should be added.
updates_collections: An optional list of collections to which `update_op`
should be added.
name: An optional variable_scope name.
Returns:
precision: A scalar `Tensor` representing the precision at the given
`target_recall` value.
update_op: An operation that increments the variables for tracking the
true positives, false positives, true negatives, and false negatives and
whose value matches `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`target_recall` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.precision_at_recall is not '
'supported when eager execution is enabled.')
if target_recall < 0 or target_recall > 1:
raise ValueError('`target_recall` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'precision_at_recall',
(predictions, labels, weights)):
kepsilon = 1e-7 # Used to avoid division by zero.
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights)
def compute_precision_at_recall(tp, fp, fn, name):
"""Computes the precision at a given recall.
Args:
tp: True positives.
fp: False positives.
fn: False negatives.
name: A name for the operation.
Returns:
The precision at the desired recall.
"""
recalls = math_ops.div(tp, tp + fn + kepsilon)
# Because recall is monotone decreasing as a function of the threshold,
# the smallest recall exceeding target_recall occurs at the largest
# threshold where recall >= target_recall.
admissible_recalls = math_ops.cast(
math_ops.greater_equal(recalls, target_recall), dtypes.int64)
tf_index = math_ops.reduce_sum(admissible_recalls) - 1
# Now we have the threshold at which to compute precision:
return math_ops.div(tp[tf_index] + kepsilon,
tp[tf_index] + fp[tf_index] + kepsilon,
name)
precision_value = compute_precision_at_recall(
values['tp'], values['fp'], values['fn'], 'value')
update_op = compute_precision_at_recall(
update_ops['tp'], update_ops['fp'], update_ops['fn'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, precision_value)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return precision_value, update_op
def streaming_sparse_average_precision_at_k(predictions,
labels,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
See `sparse_average_precision_at_k` for details on formula. `weights` are
applied to the result of `sparse_average_precision_at_k`
`streaming_sparse_average_precision_at_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and `predictions` has shape
[batch size, num_classes]. The final dimension contains the logit values
for each class. [D1, ... DN] must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range are ignored.
k: Integer, k for @k metric. This will calculate an average precision for
range `[1,k]`, as documented above.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
"""
return metrics.average_precision_at_k(
k=k,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_sparse_average_precision_at_top_k(top_k_predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
`streaming_sparse_average_precision_at_top_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Set operations applied to `top_k` and `labels` calculate
the true positives and false positives weighted by `weights`. Then `update_op`
increments `true_positive_at_<k>` and `false_positive_at_<k>` using these
values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final
dimension must be set and contains the top `k` predicted class indices.
[D1, ... DN] must match `labels`. Values should be in range
[0, num_classes).
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `top_k_predictions`.
Values should be in range [0, num_classes).
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
Raises:
ValueError: if the last dimension of top_k_predictions is not set.
"""
return metrics_impl._streaming_sparse_average_precision_at_top_k( # pylint: disable=protected-access
predictions_idx=top_k_predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.mean_absolute_error. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_mean_absolute_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean absolute error between the labels and predictions.
The `streaming_mean_absolute_error` function creates two local variables,
`total` and `count` that are used to compute the mean absolute error. This
average is weighted by `weights`, and it is ultimately returned as
`mean_absolute_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_absolute_error`. Internally, an `absolute_errors` operation computes the
absolute value of the differences between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `absolute_errors`, and it increments `count` with the reduced
sum of `weights`
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_absolute_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_absolute_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_absolute_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_absolute_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_mean_relative_error(predictions,
labels,
normalizer,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean relative error by normalizing with the given values.
The `streaming_mean_relative_error` function creates two local variables,
`total` and `count` that are used to compute the mean relative absolute error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_relative_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_reative_error`. Internally, a `relative_errors` operation divides the
absolute value of the differences between `predictions` and `labels` by the
`normalizer`. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `relative_errors`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
normalizer: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_relative_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_relative_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_relative_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_relative_error(
normalizer=normalizer,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.mean_squared_error. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_mean_squared_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean squared error between the labels and predictions.
The `streaming_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_squared_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_squared_error`. Internally, a `squared_error` operation computes the
element-wise square of the difference between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_squared_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_squared_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(
None,
'Please switch to tf.metrics.root_mean_squared_error. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_root_mean_squared_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the root mean squared error between the labels and predictions.
The `streaming_root_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the root mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`root_mean_squared_error`: an idempotent operation that takes the square root
of the division of `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`root_mean_squared_error`. Internally, a `squared_error` operation computes
the element-wise square of the difference between `predictions` and `labels`.
Then `update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`root_mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
root_mean_squared_error: A `Tensor` representing the current mean, the value
of `total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `root_mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.root_mean_squared_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_covariance(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the unbiased sample covariance between `predictions` and `labels`.
The `streaming_covariance` function creates four local variables,
`comoment`, `mean_prediction`, `mean_label`, and `count`, which are used to
compute the sample covariance between predictions and labels across multiple
batches of data. The covariance is ultimately returned as an idempotent
operation that simply divides `comoment` by `count` - 1. We use `count` - 1
in order to get an unbiased estimate.
The algorithm used for this online computation is described in
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance.
Specifically, the formula used to combine two sample comoments is
`C_AB = C_A + C_B + (E[x_A] - E[x_B]) * (E[y_A] - E[y_B]) * n_A * n_B / n_AB`
The comoment for a single batch of data is simply
`sum((x - E[x]) * (y - E[y]))`, optionally weighted.
If `weights` is not None, then it is used to compute weighted comoments,
means, and count. NOTE: these weights are treated as "frequency weights", as
opposed to "reliability weights". See discussion of the difference on
https://wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
To facilitate the computation of covariance across multiple batches of data,
the function creates an `update_op` operation, which updates underlying
variables and returns the updated covariance.
Args:
predictions: A `Tensor` of arbitrary size.
labels: A `Tensor` of the same size as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
covariance: A `Tensor` representing the current unbiased sample covariance,
`comoment` / (`count` - 1).
update_op: An operation that updates the local variables appropriately.
Raises:
ValueError: If labels and predictions are of different sizes or if either
`metrics_collections` or `updates_collections` are not a list or tuple.
"""
with variable_scope.variable_scope(name, 'covariance',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
count_ = metrics_impl.metric_variable([], dtypes.float32, name='count')
mean_prediction = metrics_impl.metric_variable(
[], dtypes.float32, name='mean_prediction')
mean_label = metrics_impl.metric_variable(
[], dtypes.float32, name='mean_label')
comoment = metrics_impl.metric_variable( # C_A in update equation
[], dtypes.float32, name='comoment')
if weights is None:
batch_count = math_ops.to_float(array_ops.size(labels)) # n_B in eqn
weighted_predictions = predictions
weighted_labels = labels
else:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
batch_count = math_ops.reduce_sum(weights) # n_B in eqn
weighted_predictions = math_ops.multiply(predictions, weights)
weighted_labels = math_ops.multiply(labels, weights)
update_count = state_ops.assign_add(count_, batch_count) # n_AB in eqn
prev_count = update_count - batch_count # n_A in update equation
# We update the means by Delta=Error*BatchCount/(BatchCount+PrevCount)
# batch_mean_prediction is E[x_B] in the update equation
batch_mean_prediction = _safe_div(
math_ops.reduce_sum(weighted_predictions),
batch_count)
delta_mean_prediction = _safe_div(
(batch_mean_prediction - mean_prediction) * batch_count,
update_count)
update_mean_prediction = state_ops.assign_add(mean_prediction,
delta_mean_prediction)
# prev_mean_prediction is E[x_A] in the update equation
prev_mean_prediction = update_mean_prediction - delta_mean_prediction
# batch_mean_label is E[y_B] in the update equation
batch_mean_label = _safe_div(
math_ops.reduce_sum(weighted_labels),
batch_count)
delta_mean_label = _safe_div(
(batch_mean_label - mean_label) * batch_count,
update_count)
update_mean_label = state_ops.assign_add(mean_label, delta_mean_label)
# prev_mean_label is E[y_A] in the update equation
prev_mean_label = update_mean_label - delta_mean_label
unweighted_batch_coresiduals = ((predictions - batch_mean_prediction) *
(labels - batch_mean_label))
# batch_comoment is C_B in the update equation
if weights is None:
batch_comoment = math_ops.reduce_sum(unweighted_batch_coresiduals)
else:
batch_comoment = math_ops.reduce_sum(
unweighted_batch_coresiduals * weights)
# View delta_comoment as = C_AB - C_A in the update equation above.
# Since C_A is stored in a var, by how much do we need to increment that var
# to make the var = C_AB?
delta_comoment = (
batch_comoment + (prev_mean_prediction - batch_mean_prediction) *
(prev_mean_label - batch_mean_label) *
(prev_count * batch_count / update_count))
update_comoment = state_ops.assign_add(comoment, delta_comoment)
covariance = array_ops.where(
math_ops.less_equal(count_, 1.),
float('nan'),
math_ops.truediv(comoment, count_ - 1),
name='covariance')
with ops.control_dependencies([update_comoment]):
update_op = array_ops.where(
math_ops.less_equal(count_, 1.),
float('nan'),
math_ops.truediv(comoment, count_ - 1),
name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, covariance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return covariance, update_op
def streaming_pearson_correlation(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes Pearson correlation coefficient between `predictions`, `labels`.
The `streaming_pearson_correlation` function delegates to
`streaming_covariance` the tracking of three [co]variances:
- `streaming_covariance(predictions, labels)`, i.e. covariance
- `streaming_covariance(predictions, predictions)`, i.e. variance
- `streaming_covariance(labels, labels)`, i.e. variance
The product-moment correlation ultimately returned is an idempotent operation
`cov(predictions, labels) / sqrt(var(predictions) * var(labels))`. To
facilitate correlation computation across multiple batches, the function
groups the `update_op`s of the underlying streaming_covariance and returns an
`update_op`.
If `weights` is not None, then it is used to compute a weighted correlation.
NOTE: these weights are treated as "frequency weights", as opposed to
"reliability weights". See discussion of the difference on
https://wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
Args:
predictions: A `Tensor` of arbitrary size.
labels: A `Tensor` of the same size as predictions.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
pearson_r: A `Tensor` representing the current Pearson product-moment
correlation coefficient, the value of
`cov(predictions, labels) / sqrt(var(predictions) * var(labels))`.
update_op: An operation that updates the underlying variables appropriately.
Raises:
ValueError: If `labels` and `predictions` are of different sizes, or if
`weights` is the wrong size, or if either `metrics_collections` or
`updates_collections` are not a `list` or `tuple`.
"""
with variable_scope.variable_scope(name, 'pearson_r',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
# Broadcast weights here to avoid duplicate broadcasting in each call to
# `streaming_covariance`.
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
cov, update_cov = streaming_covariance(
predictions, labels, weights=weights, name='covariance')
var_predictions, update_var_predictions = streaming_covariance(
predictions, predictions, weights=weights, name='variance_predictions')
var_labels, update_var_labels = streaming_covariance(
labels, labels, weights=weights, name='variance_labels')
pearson_r = math_ops.truediv(
cov,
math_ops.multiply(
math_ops.sqrt(var_predictions), math_ops.sqrt(var_labels)),
name='pearson_r')
update_op = math_ops.truediv(
update_cov,
math_ops.multiply(
math_ops.sqrt(update_var_predictions),
math_ops.sqrt(update_var_labels)),
name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, pearson_r)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return pearson_r, update_op
# TODO(nsilberman): add a 'normalized' flag so that the user can request
# normalization if the inputs are not normalized.
def streaming_mean_cosine_distance(predictions,
labels,
dim,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the cosine distance between the labels and predictions.
The `streaming_mean_cosine_distance` function creates two local variables,
`total` and `count` that are used to compute the average cosine distance
between `predictions` and `labels`. This average is weighted by `weights`,
and it is ultimately returned as `mean_distance`, which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_distance`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of the same shape as `labels`.
labels: A `Tensor` of arbitrary shape.
dim: The dimension along which the cosine distance is computed.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`,
and whose dimension `dim` is 1.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
mean_distance: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
radial_diffs = math_ops.multiply(predictions, labels)
radial_diffs = math_ops.reduce_sum(
radial_diffs, reduction_indices=[
dim,
], keepdims=True)
mean_distance, update_op = streaming_mean(radial_diffs, weights, None, None,
name or 'mean_cosine_distance')
mean_distance = math_ops.subtract(1.0, mean_distance)
update_op = math_ops.subtract(1.0, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_distance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_distance, update_op
def streaming_percentage_less(values,
threshold,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the percentage of values less than the given threshold.
The `streaming_percentage_less` function creates two local variables,
`total` and `count` that are used to compute the percentage of `values` that
fall below `threshold`. This rate is weighted by `weights`, and it is
ultimately returned as `percentage` which is an idempotent operation that
simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`percentage`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A numeric `Tensor` of arbitrary size.
threshold: A scalar threshold.
weights: An optional `Tensor` whose shape is broadcastable to `values`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
percentage: A `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.percentage_below(
values=values,
threshold=threshold,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_mean_iou(predictions,
labels,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculate per-step mean Intersection-Over-Union (mIOU).
Mean Intersection-Over-Union is a common evaluation metric for
semantic image segmentation, which first computes the IOU for each
semantic class and then computes the average over classes.
IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by `weights`,
and mIOU is then calculated from it.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean_iou`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened, if its rank > 1.
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened, if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that `mean_iou`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
mean_iou: A `Tensor` representing the mean intersection-over-union.
update_op: An operation that increments the confusion matrix.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_iou(
num_classes=num_classes,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def _next_array_size(required_size, growth_factor=1.5):
"""Calculate the next size for reallocating a dynamic array.
Args:
required_size: number or tf.Tensor specifying required array capacity.
growth_factor: optional number or tf.Tensor specifying the growth factor
between subsequent allocations.
Returns:
tf.Tensor with dtype=int32 giving the next array size.
"""
exponent = math_ops.ceil(
math_ops.log(math_ops.cast(required_size, dtypes.float32)) / math_ops.log(
math_ops.cast(growth_factor, dtypes.float32)))
return math_ops.cast(math_ops.ceil(growth_factor**exponent), dtypes.int32)
def streaming_concat(values,
axis=0,
max_size=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Concatenate values along an axis across batches.
The function `streaming_concat` creates two local variables, `array` and
`size`, that are used to store concatenated values. Internally, `array` is
used as storage for a dynamic array (if `maxsize` is `None`), which ensures
that updates can be run in amortized constant time.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that appends the values of a tensor and returns the
length of the concatenated axis.
This op allows for evaluating metrics that cannot be updated incrementally
using the same framework as other streaming metrics.
Args:
values: `Tensor` to concatenate. Rank and the shape along all axes other
than the axis to concatenate along must be statically known.
axis: optional integer axis to concatenate along.
max_size: optional integer maximum size of `value` along the given axis.
Once the maximum size is reached, further updates are no-ops. By default,
there is no maximum size: the array is resized as necessary.
metrics_collections: An optional list of collections that `value`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
value: A `Tensor` representing the concatenated values.
update_op: An operation that concatenates the next values.
Raises:
ValueError: if `values` does not have a statically known rank, `axis` is
not in the valid range or the size of `values` is not statically known
along any axis other than `axis`.
"""
with variable_scope.variable_scope(name, 'streaming_concat', (values,)):
# pylint: disable=invalid-slice-index
values_shape = values.get_shape()
if values_shape.dims is None:
raise ValueError('`values` must have known statically known rank')
ndim = len(values_shape)
if axis < 0:
axis += ndim
if not 0 <= axis < ndim:
raise ValueError('axis = %r not in [0, %r)' % (axis, ndim))
fixed_shape = [dim.value for n, dim in enumerate(values_shape) if n != axis]
if any(value is None for value in fixed_shape):
raise ValueError('all dimensions of `values` other than the dimension to '
'concatenate along must have statically known size')
# We move `axis` to the front of the internal array so assign ops can be
# applied to contiguous slices
init_size = 0 if max_size is None else max_size
init_shape = [init_size] + fixed_shape
array = metrics_impl.metric_variable(
init_shape, values.dtype, validate_shape=False, name='array')
size = metrics_impl.metric_variable([], dtypes.int32, name='size')
perm = [0 if n == axis else n + 1 if n < axis else n for n in range(ndim)]
valid_array = array[:size]
valid_array.set_shape([None] + fixed_shape)
value = array_ops.transpose(valid_array, perm, name='concat')
values_size = array_ops.shape(values)[axis]
if max_size is None:
batch_size = values_size
else:
batch_size = math_ops.minimum(values_size, max_size - size)
perm = [axis] + [n for n in range(ndim) if n != axis]
batch_values = array_ops.transpose(values, perm)[:batch_size]
def reallocate():
next_size = _next_array_size(new_size)
next_shape = array_ops.stack([next_size] + fixed_shape)
new_value = array_ops.zeros(next_shape, dtype=values.dtype)
old_value = array.value()
assign_op = state_ops.assign(array, new_value, validate_shape=False)
with ops.control_dependencies([assign_op]):
copy_op = array[:size].assign(old_value[:size])
# return value needs to be the same dtype as no_op() for cond
with ops.control_dependencies([copy_op]):
return control_flow_ops.no_op()
new_size = size + batch_size
array_size = array_ops.shape_internal(array, optimize=False)[0]
maybe_reallocate_op = control_flow_ops.cond(
new_size > array_size, reallocate, control_flow_ops.no_op)
with ops.control_dependencies([maybe_reallocate_op]):
append_values_op = array[size:new_size].assign(batch_values)
with ops.control_dependencies([append_values_op]):
update_op = size.assign(new_size)
if metrics_collections:
ops.add_to_collections(metrics_collections, value)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return value, update_op
# pylint: enable=invalid-slice-index
def aggregate_metrics(*value_update_tuples):
"""Aggregates the metric value tensors and update ops into two lists.
Args:
*value_update_tuples: a variable number of tuples, each of which contain the
pair of (value_tensor, update_op) from a streaming metric.
Returns:
A list of value `Tensor` objects and a list of update ops.
Raises:
ValueError: if `value_update_tuples` is empty.
"""
if not value_update_tuples:
raise ValueError('Expected at least one value_tensor/update_op pair')
value_ops, update_ops = zip(*value_update_tuples)
return list(value_ops), list(update_ops)
def aggregate_metric_map(names_to_tuples):
"""Aggregates the metric names to tuple dictionary.
This function is useful for pairing metric names with their associated value
and update ops when the list of metrics is long. For example:
```python
metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map({
'Mean Absolute Error': new_slim.metrics.streaming_mean_absolute_error(
predictions, labels, weights),
'Mean Relative Error': new_slim.metrics.streaming_mean_relative_error(
predictions, labels, labels, weights),
'RMSE Linear': new_slim.metrics.streaming_root_mean_squared_error(
predictions, labels, weights),
'RMSE Log': new_slim.metrics.streaming_root_mean_squared_error(
predictions, labels, weights),
})
```
Args:
names_to_tuples: a map of metric names to tuples, each of which contain the
pair of (value_tensor, update_op) from a streaming metric.
Returns:
A dictionary from metric names to value ops and a dictionary from metric
names to update ops.
"""
metric_names = names_to_tuples.keys()
value_ops, update_ops = zip(*names_to_tuples.values())
return dict(zip(metric_names, value_ops)), dict(zip(metric_names, update_ops))
def count(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the number of examples, or sum of `weights`.
This metric keeps track of the denominator in `tf.metrics.mean`.
When evaluating some metric (e.g. mean) on one or more subsets of the data,
this auxiliary metric is useful for keeping track of how many examples there
are in each subset.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions. Only it's shape is used.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
count: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the metric from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.contrib.metrics.count is not supported when eager '
'execution is enabled.')
with variable_scope.variable_scope(name, 'count', (values, weights)):
count_ = metrics_impl.metric_variable([], dtypes.float32, name='count')
if weights is None:
num_values = math_ops.to_float(array_ops.size(values))
else:
values = math_ops.to_float(values)
values, _, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=values,
labels=None,
weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
num_values = math_ops.reduce_sum(weights)
with ops.control_dependencies([values]):
update_count_op = state_ops.assign_add(count_, num_values)
count_ = metrics_impl._aggregate_variable(count_, metrics_collections) # pylint: disable=protected-access
if updates_collections:
ops.add_to_collections(updates_collections, update_count_op)
return count_, update_count_op
def cohen_kappa(labels,
predictions_idx,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates Cohen's kappa.
[Cohen's kappa](https://en.wikipedia.org/wiki/Cohen's_kappa) is a statistic
that measures inter-annotator agreement.
The `cohen_kappa` function calculates the confusion matrix, and creates three
local variables to compute the Cohen's kappa: `po`, `pe_row`, and `pe_col`,
which refer to the diagonal part, rows and columns totals of the confusion
matrix, respectively. This value is ultimately returned as `kappa`, an
idempotent operation that is calculated by
pe = (pe_row * pe_col) / N
k = (sum(po) - sum(pe)) / (N - sum(pe))
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`kappa`. `update_op` weights each prediction by the corresponding value in
`weights`.
Class labels are expected to start at 0. E.g., if `num_classes`
was three, then the possible labels would be [0, 1, 2].
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
NOTE: Equivalent to `sklearn.metrics.cohen_kappa_score`, but the method
doesn't support weighted matrix yet.
Args:
labels: 1-D `Tensor` of real labels for the classification task. Must be
one of the following types: int16, int32, int64.
predictions_idx: 1-D `Tensor` of predicted class indices for a given
classification. Must have the same type as `labels`.
num_classes: The possible number of labels.
weights: Optional `Tensor` whose shape matches `predictions`.
metrics_collections: An optional list of collections that `kappa` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
kappa: Scalar float `Tensor` representing the current Cohen's kappa.
update_op: `Operation` that increments `po`, `pe_row` and `pe_col`
variables appropriately and whose value matches `kappa`.
Raises:
ValueError: If `num_classes` is less than 2, or `predictions` and `labels`
have mismatched shapes, or if `weights` is not `None` and its shape
doesn't match `predictions`, or if either `metrics_collections` or
`updates_collections` are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.contrib.metrics.cohen_kappa is not supported '
'when eager execution is enabled.')
if num_classes < 2:
raise ValueError('`num_classes` must be >= 2.'
'Found: {}'.format(num_classes))
with variable_scope.variable_scope(name, 'cohen_kappa',
(labels, predictions_idx, weights)):
# Convert 2-dim (num, 1) to 1-dim (num,)
labels.get_shape().with_rank_at_most(2)
if labels.get_shape().ndims == 2:
labels = array_ops.squeeze(labels, axis=[-1])
predictions_idx, labels, weights = (
metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions_idx,
labels=labels,
weights=weights))
predictions_idx.get_shape().assert_is_compatible_with(labels.get_shape())
stat_dtype = (
dtypes.int64
if weights is None or weights.dtype.is_integer else dtypes.float32)
po = metrics_impl.metric_variable((num_classes,), stat_dtype, name='po')
pe_row = metrics_impl.metric_variable(
(num_classes,), stat_dtype, name='pe_row')
pe_col = metrics_impl.metric_variable(
(num_classes,), stat_dtype, name='pe_col')
# Table of the counts of agreement:
counts_in_table = confusion_matrix.confusion_matrix(
labels,
predictions_idx,
num_classes=num_classes,
weights=weights,
dtype=stat_dtype,
name='counts_in_table')
po_t = array_ops.diag_part(counts_in_table)
pe_row_t = math_ops.reduce_sum(counts_in_table, axis=0)
pe_col_t = math_ops.reduce_sum(counts_in_table, axis=1)
update_po = state_ops.assign_add(po, po_t)
update_pe_row = state_ops.assign_add(pe_row, pe_row_t)
update_pe_col = state_ops.assign_add(pe_col, pe_col_t)
def _calculate_k(po, pe_row, pe_col, name):
po_sum = math_ops.reduce_sum(po)
total = math_ops.reduce_sum(pe_row)
pe_sum = math_ops.reduce_sum(
_safe_div(
math_ops.to_double(pe_row * pe_col),
math_ops.to_double(total)))
po_sum, pe_sum, total = (math_ops.to_double(po_sum),
math_ops.to_double(pe_sum),
math_ops.to_double(total))
# kappa = (po - pe) / (N - pe)
k = metrics_impl._safe_scalar_div( # pylint: disable=protected-access
po_sum - pe_sum,
total - pe_sum,
name=name)
return k
kappa = _calculate_k(po, pe_row, pe_col, name='value')
update_op = _calculate_k(
update_po, update_pe_row, update_pe_col, name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, kappa)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return kappa, update_op
__all__ = [
'auc_with_confidence_intervals',
'aggregate_metric_map',
'aggregate_metrics',
'cohen_kappa',
'count',
'precision_recall_at_equal_thresholds',
'recall_at_precision',
'sparse_recall_at_top_k',
'streaming_accuracy',
'streaming_auc',
'streaming_curve_points',
'streaming_dynamic_auc',
'streaming_false_negative_rate',
'streaming_false_negative_rate_at_thresholds',
'streaming_false_negatives',
'streaming_false_negatives_at_thresholds',
'streaming_false_positive_rate',
'streaming_false_positive_rate_at_thresholds',
'streaming_false_positives',
'streaming_false_positives_at_thresholds',
'streaming_mean',
'streaming_mean_absolute_error',
'streaming_mean_cosine_distance',
'streaming_mean_iou',
'streaming_mean_relative_error',
'streaming_mean_squared_error',
'streaming_mean_tensor',
'streaming_percentage_less',
'streaming_precision',
'streaming_precision_at_thresholds',
'streaming_recall',
'streaming_recall_at_k',
'streaming_recall_at_thresholds',
'streaming_root_mean_squared_error',
'streaming_sensitivity_at_specificity',
'streaming_sparse_average_precision_at_k',
'streaming_sparse_average_precision_at_top_k',
'streaming_sparse_precision_at_k',
'streaming_sparse_precision_at_top_k',
'streaming_sparse_recall_at_k',
'streaming_specificity_at_sensitivity',
'streaming_true_negatives',
'streaming_true_negatives_at_thresholds',
'streaming_true_positives',
'streaming_true_positives_at_thresholds',
]
|
apache-2.0
|
QuantSoftware/QuantSoftwareToolkit
|
QSTK/qstklearn/1knn.py
|
7
|
9098
|
'''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on Feb 20, 2011
@author: John Cornwell
@organization: Georgia Institute of Technology
@contact: [email protected]
@summary: This is an implementation of the 1-KNN algorithm for ranking features quickly.
It uses the knn implementation.
@status: oneKNN functions correctly, optimized to use n^2/2 algorithm.
'''
import matplotlib.pyplot as plt
from pylab import gca
import itertools
import string
import numpy as np
import math
import knn
from time import clock
'''
@summary: Query function for 1KNN, return value is a double between 0 and 1.
@param naData: A 2D numpy array. Each row is a data point with the final column containing the classification.
'''
def oneKnn( naData ):
if naData.ndim != 2:
raise Exception( "Data should have two dimensions" )
lLen = naData.shape[0]
''' # of dimensions, subtract one for classification '''
lDim = naData.shape[1] - 1
''' Start best distances as very large '''
ldDistances = [1E300] * lLen
llIndexes = [-1] * lLen
dDistance = 0.0;
''' Loop through finding closest neighbors '''
for i in range( lLen ):
for j in range( i+1, lLen ):
dDistance = 0.0
for k in range( 0, lDim ):
dDistance += (naData[i][k] - naData[j][k])**2
dDistance = math.sqrt( dDistance )
''' Two distances to check, for i's best, and j's best '''
if dDistance < ldDistances[i]:
ldDistances[i] = dDistance
llIndexes[i] = j
if dDistance < ldDistances[j]:
ldDistances[j] = dDistance
llIndexes[j] = i
lCount = 0
''' Now count # of matching pairs '''
for i in range( lLen ):
if naData[i][-1] == naData[ llIndexes[i] ][-1]:
lCount = lCount + 1
return float(lCount) / lLen
''' Test function to plot results '''
def _plotResults( naDist1, naDist2, lfOneKnn, lf5Knn ):
plt.clf()
plt.subplot(311)
plt.scatter( naDist1[:,0], naDist1[:,1] )
plt.scatter( naDist2[:,0], naDist2[:,1], color='r' )
#plt.ylabel( 'Feature 2' )
#plt.xlabel( 'Feature 1' )
#gca().annotate( '', xy=( .8, 0 ), xytext=( -.3 , 0 ), arrowprops=dict(facecolor='red', shrink=0.05) )
gca().annotate( '', xy=( .7, 0 ), xytext=( 1.5 , 0 ), arrowprops=dict(facecolor='black', shrink=0.05) )
plt.title( 'Data Distribution' )
plt.subplot(312)
plt.plot( range( len(lfOneKnn) ), lfOneKnn )
plt.ylabel( '1-KNN Value' )
#plt.xlabel( 'Distribution Merge' )
plt.title( '1-KNN Performance' )
plt.subplot(313)
plt.plot( range( len(lf5Knn) ), lf5Knn )
plt.ylabel( '% Correct Classification' )
#plt.xlabel( 'Distribution Merge' )
plt.title( '5-KNN Performance' )
plt.subplots_adjust()
plt.show()
''' Function to plot 2 distributions '''
def _plotDist( naDist1, naDist2, i ):
plt.clf()
plt.scatter( naDist1[:,0], naDist1[:,1] )
plt.scatter( naDist2[:,0], naDist2[:,1], color='r' )
plt.ylabel( 'Feature 2' )
plt.xlabel( 'Feature 1' )
plt.title( 'Iteration ' + str(i) )
plt.show()
''' Function to test KNN performance '''
def _knnResult( naData ):
''' Split up data into training/testing '''
lSplit = naData.shape[0] * .7
naTrain = naData[:lSplit, :]
naTest = naData[lSplit:, :]
knn.addEvidence( naTrain.astype(float), 1 );
''' Query with last column omitted and 5 nearest neighbors '''
naResults = knn.query( naTest[:,:-1], 5, 'mode')
''' Count returns which are correct '''
lCount = 0
for i, dVal in enumerate(naResults):
if dVal == naTest[i,-1]:
lCount = lCount + 1
dResult = float(lCount) / naResults.size
return dResult
''' Tests performance of 1-KNN '''
def _test1():
''' Generate three random samples to show the value of 1-KNN compared to 5KNN learner performance '''
for i in range(3):
''' Select one of three distributions '''
if i == 0:
naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] )
naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) )
naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[500,2] )
naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) )
elif i == 1:
naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] )
naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) )
naTest2 = np.random.normal( loc=[1.5,0],scale=.1,size=[500,2] )
naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) )
else:
naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] )
naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) )
naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[250,2] )
naTest2 = np.hstack( (naTest2, np.ones(250).reshape(-1,1) ) )
naOrig = np.vstack( (naTest1, naTest2) )
naBoth = np.vstack( (naTest1, naTest2) )
''' Keep track of runtimes '''
t = clock()
cOneRuntime = t-t;
cKnnRuntime = t-t;
lfResults = []
lfKnnResults = []
for i in range( 15 ):
#_plotDist( naTest1, naBoth[100:,:], i )
t = clock()
lfResults.append( oneKnn( naBoth ) )
cOneRuntime = cOneRuntime + (clock() - t)
t = clock()
lfKnnResults.append( _knnResult( np.random.permutation(naBoth) ) )
cKnnRuntime = cKnnRuntime + (clock() - t)
naBoth[500:,0] = naBoth[500:,0] - .1
print 'Runtime OneKnn:', cOneRuntime
print 'Runtime 5-KNN:', cKnnRuntime
_plotResults( naTest1, naTest2, lfResults, lfKnnResults )
''' Tests performance of 1-KNN '''
def _test2():
''' Generate three random samples to show the value of 1-KNN compared to 5KNN learner performance '''
np.random.seed( 12345 )
''' Create 5 distributions for each of the 5 attributes '''
dist1 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
dist2 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
dist3 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
dist4 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
dist5 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
lDists = [ dist1, dist2, dist3, dist4, dist5 ]
''' All features used except for distribution 4 '''
distY = np.sin( dist1 ) + np.sin( dist2 ) + np.sin( dist3 ) + np.sin( dist5 )
distY = distY.reshape( -1, 1 )
for i, fVal in enumerate( distY ):
if fVal >= 0:
distY[i] = 1
else:
distY[i] = 0
for i in range( 1, 6 ):
lsNames = []
lf1Vals = []
lfVals = []
for perm in itertools.combinations( '12345', i ):
''' set test distribution to first element '''
naTest = lDists[ int(perm[0]) - 1 ]
sPerm = perm[0]
''' stack other distributions on '''
for j in range( 1, len(perm) ):
sPerm = sPerm + str(perm[j])
naTest = np.hstack( (naTest, lDists[ int(perm[j]) - 1 ] ) )
''' finally stack y values '''
naTest = np.hstack( (naTest, distY) )
lf1Vals.append( oneKnn( naTest ) )
lfVals.append( _knnResult( np.random.permutation(naTest) ) )
lsNames.append( sPerm )
''' Plot results '''
plt1 = plt.bar( np.arange(len(lf1Vals)), lf1Vals, .2, color='r' )
plt2 = plt.bar( np.arange(len(lfVals)) + 0.2, lfVals, .2, color='b' )
plt.legend( (plt1[0], plt2[0]), ('1-KNN', 'KNN, K=5') )
plt.ylabel('1-KNN Value/KNN Classification')
plt.xlabel('Feature Set')
plt.title('Combinations of ' + str(i) + ' Features')
plt.ylim( (0,1) )
if len(lf1Vals) < 2:
plt.xlim( (-1,1) )
gca().xaxis.set_ticks( np.arange(len(lf1Vals)) + .2 )
gca().xaxis.set_ticklabels( lsNames )
plt.show()
if __name__ == '__main__':
_test1()
#_test2()
|
bsd-3-clause
|
jlegendary/scikit-learn
|
examples/classification/plot_classifier_comparison.py
|
181
|
4699
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.lda import LDA
from sklearn.qda import QDA
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "LDA", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LDA(),
QDA()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
|
bsd-3-clause
|
roxyboy/scikit-learn
|
benchmarks/bench_tree.py
|
297
|
3617
|
"""
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
|
bsd-3-clause
|
numenta/htmresearch
|
projects/sdr_paper/pytorch_experiments/analyze_nonzero.py
|
2
|
7493
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import logging
import multiprocessing
import os
import numpy as np
import torch
from torchvision import datasets, transforms
from tqdm import tqdm
from htmresearch.frameworks.pytorch.benchmark_utils import (
register_nonzero_counter, unregister_counter_nonzero)
from htmresearch.frameworks.pytorch.image_transforms import RandomNoise
from htmresearch.frameworks.pytorch.model_utils import evaluateModel
logging.basicConfig(level=logging.ERROR)
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from tabulate import tabulate
import pandas as pd
from htmresearch.frameworks.pytorch.mnist_sparse_experiment import \
MNISTSparseExperiment
def analyzeWeightPruning(args):
"""
Multiprocess function used to analyze the impact of nonzeros and accuracy
after pruning low weights and units with low dutycycle of a pre-trained model.
:param args: tuple with the following arguments:
- experiment path: The experiment results path
- configuration parameters: The parameters used in the experiment run
- minWeight: min weight to prune. If zero then no pruning
- minDutycycle: min threshold to prune. If less than zero then no pruning
- progress bar position:
When 'minWeight' is zero
:type args: tuple
:return: Panda DataFrame with the nonzero count for every weight variable in
the model and the evaluation results after the pruning the weights.
:rtype: :class:`pandas.DataFrame`
"""
path, params, minWeight, minDutycycle, position = args
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Dataset transformations used during training. See mnist_sparse_experiment.py
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
# Initialize MNIST test dataset for this experiment
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(params["datadir"], train=False, download=True,
transform=transform),
batch_size=params["test_batch_size"], shuffle=True)
# Load pre-trained model and evaluate with test dataset
model = torch.load(os.path.join(path, "model.pt"), map_location=device)
label = str(minWeight)
name = params["name"]
desc = "{}.minW({}).minD({})".format(name, minWeight, minDutycycle)
model.pruneWeights(minWeight)
model.pruneDutycycles(minDutycycle)
# Collect nonzero
nonzero = {}
register_nonzero_counter(model, nonzero)
results = evaluateModel(model=model, loader=test_loader, device=device,
progress={"desc": desc, "position": position})
unregister_counter_nonzero(model)
# Create table with results
table = pd.DataFrame.from_dict(nonzero)
noise_score = results["total_correct"]
table = table.assign(accuracy=results["accuracy"])
# Compute noise score
noise_values = tqdm([0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5],
position=position)
for noise in noise_values:
noise_values.set_description("{}.noise({})".format(desc, noise))
# Add noise to dataset transforms
transform.transforms.append(
RandomNoise(noise, whiteValue=0.1307 + 2 * 0.3081))
# Evaluate model with noise
results = evaluateModel(model=model, loader=test_loader, device=device)
# Remove noise from dataset transforms
transform.transforms.pop()
# Update noise score
noise_score += results["total_correct"]
table = table.assign(noise_score=noise_score)
# Filter result for the 'weight' variable only
table = pd.DataFrame({label: table.xs("weight")})
table.drop(["input", "output"], inplace=True)
table.dropna(inplace=True)
return table
def plotDataframe(table, title, plotPath):
"""
Plot Panda dataframe.
:param table: Panda dataframe returned by :func:`analyzeWeightPruning`
:type table: :class:`pandas.DataFrame`
:param title: Plot title
:type title: str
:param plotPath: Plot full path
:type plotPath: str
"""
plt.figure()
axes = table.T.plot(subplots=True, sharex=True, grid=True, legend=True,
title=title, figsize=(8, 11))
# Use fixed scale for "accuracy"
accuracy = next(ax for ax in axes if ax.lines[0].get_label() == 'accuracy')
accuracy.set_ylim(0.0, 1.0)
plt.savefig(plotPath)
plt.close()
def run(pool, expName, name, args):
"""
Runs :func:`analyzeWeightPruning` in parallel and save the results
:param pool: multiprocessing pool
:param expName: Experiment name
:param name: File/Plot name (i.e. 'weight_prunning')
:param args: Argument list to be passed to :func:`analyzeWeightPruning`
:return: panda dataframe with all the results
"""
tables = pool.map(analyzeWeightPruning, args)
merged = pd.concat(tables, axis=1).sort_index(axis=1)
filename = "{}_{}".format(name, expName)
plotDataframe(merged, filename, "{}.pdf".format(filename))
print()
print(filename)
print(tabulate(merged, headers='keys', tablefmt='fancy_grid',
numalign="right"))
merged.to_csv("{}.csv".format(filename))
return merged
def main():
# Initialize experiment options and parameters
suite = MNISTSparseExperiment()
suite.parse_opt()
suite.parse_cfg()
experiments = suite.options.experiments or suite.cfgparser.sections()
pool = multiprocessing.Pool()
for expName in experiments:
path = suite.get_exp(expName)[0]
results = suite.get_exps(path=path)
# Build argument list for multiprocessing pool
args = []
for exp in results:
params = suite.get_params(exp)
for i, minWeight in enumerate(np.linspace(0.0, 0.1, 21)):
args.append((exp, params, minWeight, -1, i))
args = np.array(args)
# Analyze weight pruning alone. No dutycycle pruning
args[:, 3] = -1 # set minDutycycle to -1 for all experiments
run(pool, expName, "Weight Pruning", args)
# Analyze dutycycle pruning units with dutycycle below 5% from target density
args[:, 3] = 0.05 # set minDutycycle to 5% for all experiments
run(pool, expName, "Dutycycle Pruning (5%)", args)
# Analyze dutycycle pruning units with dutycycle below 10% from target density
args[:, 3] = 0.10 # set minDutycycle to 10% for all experiments
run(pool, expName, "Dutycycle Pruning (10%)", args)
if __name__ == '__main__':
main()
|
agpl-3.0
|
jkarnows/scikit-learn
|
sklearn/cluster/tests/test_k_means.py
|
132
|
25860
|
"""Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_not_mac_os
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(DataConversionWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
def _has_blas_lib(libname):
from numpy.distutils.system_info import get_info
return libname in get_info('blas_opt').get('libraries', [])
@if_not_mac_os()
def test_k_means_plus_plus_init_2_jobs():
if _has_blas_lib('openblas'):
raise SkipTest('Multi-process bug with OpenBLAS (see issue #636)')
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
assert_raises(ValueError,
MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
|
bsd-3-clause
|
IshitaTakeshi/SCW
|
datasets.py
|
1
|
1754
|
from os.path import exists, join
import numpy as np
from sklearn import datasets
from sklearn.utils import shuffle
import utils
dataset_dir = "datasets"
def fetch_mnist(training_ratio=0.8, data_home="."):
mnist = datasets.fetch_mldata('MNIST original', data_home=data_home)
X, y = shuffle(mnist.data, mnist.target)
splitter = int(len(X)*training_ratio)
training = X[:splitter], y[:splitter]
test = X[splitter:], y[splitter:]
return training, test
def download_mnist():
training, test = fetch_mnist(data_home=dataset_dir)
X, y = training
datasets.dump_svmlight_file(X, y, join(dataset_dir, "mnist"))
X, y = test
datasets.dump_svmlight_file(X, y, join(dataset_dir, "mnist.t"))
def load_mnist():
def pick(X, y):
indices = np.logical_or(y==0, y==1)
X = X.todense()
X = X[indices]
y = y[indices]
y = utils.overwrite_labels(y)
return X, y
n_features = 784
training_path = join(dataset_dir, "mnist")
test_path = join(dataset_dir, "mnist.t")
if not exists(training_path) or not exists(test_path):
download_mnist()
X, y = datasets.load_svmlight_file(training_path, n_features=n_features)
training = pick(X, y)
X, y = datasets.load_svmlight_file(test_path, n_features=n_features)
test = pick(X, y)
return training, test
def make_classification():
X, y = datasets.make_classification(n_samples=5000, n_features=100,
n_classes=2)
y = utils.overwrite_labels(y) # (0, 1) to (-1, 1)
return X, y
def load_digits():
digits = datasets.load_digits(2)
y = np.array(digits.target, copy=True)
y = utils.overwrite_labels(y)
return digits.data, y
|
mit
|
SciTools/tephi
|
tephi/isopleths.py
|
1
|
14880
|
# Copyright Tephi contributors
#
# This file is part of Tephi and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Tephigram isopleth support for generating and plotting tephigram lines,
environment profiles and barbs.
"""
import math
from matplotlib.collections import PathCollection
from matplotlib.path import Path
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
import numpy as np
from scipy.interpolate import interp1d
from ._constants import CONST_CP, CONST_L, CONST_KELVIN, CONST_RD, CONST_RV
from . import transforms
# Wind barb speed (knots) ranges used since 1 January 1955.
_BARB_BINS = np.arange(20) * 5 + 3
_BARB_GUTTER = 0.1
_BARB_DTYPE = np.dtype(
dict(
names=("speed", "angle", "pressure", "barb"),
formats=("f4", "f4", "f4", np.object),
)
)
#
# Reference: http://www-nwp/~hadaa/tephigram/tephi_plot.html
#
def mixing_ratio(
min_pressure, max_pressure, axes, transform, kwargs, mixing_ratio_value
):
"""
Generate and plot a humidity mixing ratio line.
A line of constant saturation mixing ratio with respect to a
plane water surface (g kg-1).
Args:
* min_pressure:
Minumum pressure, in mb or hPa, for the mixing ratio line extent.
* max_pressure:
Maximum pressure, in mb or hPa, for the mixing ratio line extent.
* axes:
Tephigram plotting :class:`matplotlib.axes.AxesSubplot` instance.
* transform:
Tephigram plotting transformation
:class:`matplotlib.transforms.CompositeGenericTransform` instance.
* kwargs:
Keyword arguments for the mixing ratio :class:`matplotlib.lines.Line2D`
instance.
* mixing_ratio_value:
The mixing ratio value to be plotted.
Returns:
The mixing ratio :class:`matplotlib.lines.Line2D` instance.
"""
pressures = np.linspace(min_pressure, max_pressure, 100)
temps = transforms.convert_pw2T(pressures, mixing_ratio_value)
_, thetas = transforms.convert_pT2Tt(pressures, temps)
(line,) = axes.plot(temps, thetas, transform=transform, **kwargs)
return line
def isobar(min_theta, max_theta, axes, transform, kwargs, pressure):
"""
Generate and plot an isobar line.
A line of constant pressure (mb).
Args:
* min_theta:
Minimum potential temperature, in degC, for the isobar extent.
* max_theta:
Maximum potential temperature, in degC, for the isobar extent.
* axes:
Tephigram plotting :class:`matplotlib.axes.AxesSubplot` instance.
* transform:
Tephigram plotting transformation
:class:`matplotlib.transforms.CompositeGenericTransform` instance.
* kwargs:
Keyword arguments for the isobar :class:`matplotlib.lines.Line2D`
instance.
* pressure:
The isobar pressure value, in mb or hPa, to be plotted.
Returns:
The isobar :class:`matplotlib.lines.Line2D` instance.
"""
steps = 100
thetas = np.linspace(min_theta, max_theta, steps)
_, temps = transforms.convert_pt2pT([pressure] * steps, thetas)
(line,) = axes.plot(temps, thetas, transform=transform, **kwargs)
return line
def _wet_adiabat_gradient(min_temperature, pressure, temperature, dp):
"""
Calculate the wet adiabat change in pressure and temperature.
Args:
* min_temperature:
Minimum potential temperature, in degC, for the wet adiabat line
extent.
* pressure:
Pressure point value, in mb or hPa, from which to calculate the
gradient difference.
* temperature:
Potential temperature point value, in degC, from which to calculate
the gradient difference.
* dp:
The wet adiabat change in pressure, in mb or hPa, from which to
calculate the gradient difference.
Returns:
The gradient change as a pressure, potential temperature value pair.
"""
# TODO: Discover the meaning of the magic numbers.
kelvin = temperature + CONST_KELVIN
lsbc = (CONST_L / CONST_RV) * ((1.0 / CONST_KELVIN) - (1.0 / kelvin))
rw = 6.11 * np.exp(lsbc) * (0.622 / pressure)
lrwbt = (CONST_L * rw) / (CONST_RD * kelvin)
nume = ((CONST_RD * kelvin) / (CONST_CP * pressure)) * (1.0 + lrwbt)
deno = 1.0 + (lrwbt * ((0.622 * CONST_L) / (CONST_CP * kelvin)))
gradi = nume / deno
dt = dp * gradi
if (temperature + dt) < min_temperature:
dt = min_temperature - temperature
dp = dt / gradi
return dp, dt
def wet_adiabat(
max_pressure, min_temperature, axes, transform, kwargs, temperature
):
"""
Generate and plot a pseudo saturated wet adiabat line.
A line of constant equivalent potential temperature for saturated
air parcels (degC).
Args:
* max_pressure:
Maximum pressure, in mb or hPa, for the wet adiabat line extent.
* min_temperature:
Minimum potential temperature, in degC, for the wet adiabat line
extent.
* axes:
Tephigram plotting :class:`matplotlib.axes.AxesSubplot` instance.
* transform:
Tephigram plotting transformation
:class:`matplotlib.transforms.CompositeGenericTransform` instance.
* kwargs:
Keyword arguments for the mixing ratio :class:`matplotlib.lines.Line2D`
instance.
* temperature:
The wet adiabat value, in degC, to be plotted.
Returns:
The wet adiabat :class:`matplotlib.lines.Line2D` instance.
"""
temps = [temperature]
pressures = [max_pressure]
dp = -5.0
for i in range(200):
dp, dt = _wet_adiabat_gradient(
min_temperature, pressures[i], temps[i], dp
)
temps.append(temps[i] + dt)
pressures.append(pressures[i] + dp)
_, thetas = transforms.convert_pT2Tt(pressures, temps)
(line,) = axes.plot(temps, thetas, transform=transform, **kwargs)
return line
class Barbs:
"""Generate a wind arrow barb."""
def __init__(self, axes):
"""
Create a wind arrow barb for the given axes.
Args:
* axes:
A :class:`matplotlib.axes.AxesSubplot` instance.
"""
self.axes = axes
self.barbs = None
self._gutter = None
self._transform = axes.tephigram_transform + axes.transData
self._kwargs = None
self._custom_kwargs = None
self._custom = dict(
color=["barbcolor", "color", "edgecolor", "facecolor"],
linewidth=["lw", "linewidth"],
linestyle=["ls", "linestyle"],
)
@staticmethod
def _uv(magnitude, angle):
"""
Convert magnitude and angle measured in degrees to u and v components,
where u is -x and v is -y.
"""
angle = angle % 360
u = v = 0
# Snap the magnitude of the barb vector to fall into one of the
# _BARB_BINS ensuring it's a multiple of five. Five is the increment
# step size for decorating with barb with flags.
magnitude = np.searchsorted(_BARB_BINS, magnitude, side="right") * 5
modulus = angle % 90
if modulus:
quadrant = int(angle / 90)
radians = math.radians(modulus)
y = math.cos(radians) * magnitude
x = math.sin(radians) * magnitude
if quadrant == 0:
u, v = -x, -y
elif quadrant == 1:
u, v = -y, x
elif quadrant == 2:
u, v = x, y
else:
u, v = y, -x
else:
angle = int(angle)
if angle == 0:
v = -magnitude
elif angle == 90:
u = -magnitude
elif angle == 180:
v = magnitude
else:
u = magnitude
return u, v
def _make_barb(self, temperature, theta, speed, angle):
"""Add the barb to the plot at the specified location."""
u, v = self._uv(speed, angle)
if 0 < speed < _BARB_BINS[0]:
# Plot the missing barbless 1-2 knots line.
length = self._kwargs["length"]
pivot_points = dict(tip=0.0, middle=-length / 2.0)
pivot = self._kwargs.get("pivot", "tip")
offset = pivot_points[pivot]
verts = [(0.0, offset), (0.0, length + offset)]
rangle = math.radians(-angle)
verts = mtransforms.Affine2D().rotate(rangle).transform(verts)
codes = [Path.MOVETO, Path.LINETO]
path = Path(verts, codes)
size = length ** 2 / 4
xy = np.array([[temperature, theta]])
barb = PathCollection(
[path],
(size,),
offsets=xy,
transOffset=self._transform,
**self._custom_kwargs,
)
barb.set_transform(mtransforms.IdentityTransform())
self.axes.add_collection(barb)
else:
barb = plt.barbs(
temperature,
theta,
u,
v,
transform=self._transform,
**self._kwargs,
)
return barb
def refresh(self):
"""Refresh the plot with the barbs."""
if self.barbs is not None:
xlim = self.axes.get_xlim()
ylim = self.axes.get_ylim()
y = np.linspace(*ylim)[::-1]
xdelta = xlim[1] - xlim[0]
x = np.ones(y.size) * (xlim[1] - (xdelta * self._gutter))
xy = np.column_stack((x, y))
points = self.axes.tephigram_inverse.transform(xy)
temperature, theta = points[:, 0], points[:, 1]
pressure, _ = transforms.convert_Tt2pT(temperature, theta)
min_pressure, max_pressure = np.min(pressure), np.max(pressure)
func = interp1d(pressure, temperature)
for i, (speed, angle, pressure, barb) in enumerate(self.barbs):
if min_pressure < pressure < max_pressure:
p2T = func(pressure)
temperature, theta = transforms.convert_pT2Tt(
pressure, p2T
)
if barb is None:
self.barbs[i]["barb"] = self._make_barb(
temperature, theta, speed, angle
)
else:
barb.set_offsets(np.array([[temperature, theta]]))
barb.set_visible(True)
else:
if barb is not None:
barb.set_visible(False)
def plot(self, barbs, **kwargs):
"""
Plot the sequence of barbs.
Args:
* barbs:
Sequence of speed, direction and pressure value triples for
each barb. Where speed is measured in units of knots, direction
in units of degrees (clockwise from north), and pressure must
be in units of mb or hPa.
Kwargs:
* gutter:
Proportion offset from the right hand side axis to plot the
barbs. Defaults to 0.1
Also see :func:`matplotlib.pyplot.barbs`
"""
self._gutter = kwargs.pop("gutter", _BARB_GUTTER)
self._kwargs = dict(length=7, zorder=10)
self._kwargs.update(kwargs)
self._custom_kwargs = dict(
color=None, linewidth=1.5, zorder=self._kwargs["zorder"]
)
for key, values in self._custom.items():
common = set(values).intersection(kwargs)
if common:
self._custom_kwargs[key] = kwargs[sorted(common)[0]]
if hasattr(barbs, "__next__"):
barbs = list(barbs)
barbs = np.asarray(barbs)
if barbs.ndim != 2 or barbs.shape[-1] != 3:
msg = (
"The barbs require to be a sequence of wind speed, "
"wind direction and pressure value triples."
)
raise ValueError(msg)
self.barbs = np.empty(barbs.shape[0], dtype=_BARB_DTYPE)
for i, barb in enumerate(barbs):
self.barbs[i] = tuple(barb) + (None,)
self.refresh()
class Profile:
"""Generate an environmental lapse rate profile."""
def __init__(self, data, axes):
"""
Create an environmental lapse rate profile from the sequence of
pressure and temperature point data.
Args:
* data:
Sequence of pressure and temperature points defining the
environmental lapse rate.
* axes:
The axes on which to plot the profile.
"""
if hasattr(data, "__next__"):
data = list(data)
self.data = np.asarray(data)
if self.data.ndim != 2 or self.data.shape[-1] != 2:
msg = (
"The environment profile data requires to be a sequence "
"of pressure, temperature value pairs."
)
raise ValueError(msg)
self.axes = axes
self._transform = axes.tephigram_transform + axes.transData
self.pressure = self.data[:, 0]
self.temperature = self.data[:, 1]
_, self.theta = transforms.convert_pT2Tt(
self.pressure, self.temperature
)
self.line = None
self._barbs = Barbs(axes)
def plot(self, **kwargs):
"""
Plot the environmental lapse rate profile.
Kwargs:
See :func:`matplotlib.pyplot.plot`.
Returns:
The profile :class:`matplotlib.lines.Line2D`
"""
if self.line is not None and self.line in self.axes.lines:
self.axes.lines.remove(self.line)
if "zorder" not in kwargs:
kwargs["zorder"] = 10
(self.line,) = self.axes.plot(
self.temperature, self.theta, transform=self._transform, **kwargs
)
return self.line
def refresh(self):
"""Refresh the plot with the profile and any associated barbs."""
self._barbs.refresh()
def barbs(self, barbs, **kwargs):
"""
Plot the sequence of barbs associated with this profile.
Args:
* barbs:
Sequence of speed, direction and pressure value triples for
each barb. Where speed is measured in units of knots, direction
in units of degrees (clockwise from north), and pressure must
be in units of mb or hPa.
Kwargs:
See :func:`matplotlib.pyplot.barbs`
"""
colors = ["color", "barbcolor", "edgecolor", "facecolor"]
if not set(colors).intersection(kwargs):
kwargs["color"] = self.line.get_color()
self._barbs.plot(barbs, **kwargs)
|
lgpl-3.0
|
cl4rke/scikit-learn
|
sklearn/covariance/robust_covariance.py
|
198
|
29735
|
"""
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
|
bsd-3-clause
|
nmayorov/scikit-learn
|
examples/decomposition/plot_pca_vs_fa_model_selection.py
|
29
|
4470
|
"""
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
|
bsd-3-clause
|
cybernet14/scikit-learn
|
examples/feature_selection/plot_feature_selection.py
|
249
|
2827
|
"""
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='b')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
|
bsd-3-clause
|
Adai0808/scikit-learn
|
benchmarks/bench_plot_svd.py
|
325
|
2899
|
"""Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
|
bsd-3-clause
|
nmartensen/pandas
|
pandas/tests/io/parser/test_read_fwf.py
|
11
|
16032
|
# -*- coding: utf-8 -*-
"""
Tests the 'read_fwf' function in parsers.py. This
test suite is independent of the others because the
engine is set to 'python-fwf' internally.
"""
from datetime import datetime
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame
from pandas import compat
from pandas.compat import StringIO, BytesIO
from pandas.io.parsers import read_csv, read_fwf, EmptyDataError
class TestFwfParsing(object):
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = read_csv(StringIO(data_expected),
engine='python', header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From Thomas Kluyver: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assert_raises_regex(ValueError,
"must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assert_raises_regex(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_BytesIO_input(self):
if not compat.PY3:
pytest.skip(
"Bytes-related test - only needs to work on Python 3")
result = read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
def test_fwf_colspecs_is_list_or_tuple(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
with tm.assert_raises_regex(TypeError,
'column specifications must '
'be a list or tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(data),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
with tm.assert_raises_regex(TypeError,
'Each column specification '
'must be.+'):
read_fwf(StringIO(data), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
assert len(res)
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71""" # noqa
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn",
"dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
pytest.skip("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = np.array([[1, 2., 4],
[5, np.nan, 10.]])
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = np.array([[1, 2334., 5],
[10, 13, 10]])
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_bool_header_arg(self):
# see gh-6114
data = """\
MyColumn
a
b
a
b"""
for arg in [True, False]:
with pytest.raises(TypeError):
read_fwf(StringIO(data), header=arg)
def test_full_file(self):
# File with all values
test = """index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
2000-01-05T00:00:00 0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0.487094399463 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
2000-01-11T00:00:00 0.157160753327 34 foo"""
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_missing(self):
# File with missing values
test = """index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
34"""
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces(self):
# File with spaces in columns
test = """
Account Name Balance CreditLimit AccountCreated
101 Keanu Reeves 9315.45 10000.00 1/17/1998
312 Gerard Butler 90.00 1000.00 8/6/2003
868 Jennifer Love Hewitt 0 17000.00 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 Bill Murray 789.65 5000.00 2/5/2007
""".strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces_and_missing(self):
# File with spaces and missing values in columns
test = """
Account Name Balance CreditLimit AccountCreated
101 10000.00 1/17/1998
312 Gerard Butler 90.00 1000.00 8/6/2003
868 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 Bill Murray 789.65
""".strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_messed_up_data(self):
# Completely messed up file
test = """
Account Name Balance Credit Limit Account Created
101 10000.00 1/17/1998
312 Gerard Butler 90.00 1000.00
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 Bill Murray 789.65
""".strip('\r\n')
colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_multiple_delimiters(self):
test = r"""
col1~~~~~col2 col3++++++++++++++++++col4
~~22.....11.0+++foo~~~~~~~~~~Keanu Reeves
33+++122.33\\\bar.........Gerard Butler
++44~~~~12.01 baz~~Jennifer Love Hewitt
~~55 11+++foo++++Jada Pinkett-Smith
..66++++++.03~~~bar Bill Murray
""".strip('\r\n')
colspecs = ((0, 4), (7, 13), (15, 19), (21, 41))
expected = read_fwf(StringIO(test), colspecs=colspecs,
delimiter=' +~.\\')
tm.assert_frame_equal(expected, read_fwf(StringIO(test),
delimiter=' +~.\\'))
def test_variable_width_unicode(self):
if not compat.PY3:
pytest.skip(
'Bytes-related test - only needs to work on Python 3')
test = """
שלום שלום
ום שלל
של ום
""".strip('\r\n')
expected = read_fwf(BytesIO(test.encode('utf8')),
colspecs=[(0, 4), (5, 9)],
header=None, encoding='utf8')
tm.assert_frame_equal(expected, read_fwf(
BytesIO(test.encode('utf8')), header=None, encoding='utf8'))
def test_dtype(self):
data = """ a b c
1 2 3.2
3 4 5.2
"""
colspecs = [(0, 5), (5, 10), (10, None)]
result = pd.read_fwf(StringIO(data), colspecs=colspecs)
expected = pd.DataFrame({
'a': [1, 3],
'b': [2, 4],
'c': [3.2, 5.2]}, columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
expected['a'] = expected['a'].astype('float64')
expected['b'] = expected['b'].astype(str)
expected['c'] = expected['c'].astype('int32')
result = pd.read_fwf(StringIO(data), colspecs=colspecs,
dtype={'a': 'float64', 'b': str, 'c': 'int32'})
tm.assert_frame_equal(result, expected)
def test_skiprows_inference(self):
# GH11256
test = """
Text contained in the file header
DataCol1 DataCol2
0.0 1.0
101.6 956.1
""".strip()
expected = read_csv(StringIO(test), skiprows=2,
delim_whitespace=True)
tm.assert_frame_equal(expected, read_fwf(
StringIO(test), skiprows=2))
def test_skiprows_by_index_inference(self):
test = """
To be skipped
Not To Be Skipped
Once more to be skipped
123 34 8 123
456 78 9 456
""".strip()
expected = read_csv(StringIO(test), skiprows=[0, 2],
delim_whitespace=True)
tm.assert_frame_equal(expected, read_fwf(
StringIO(test), skiprows=[0, 2]))
def test_skiprows_inference_empty(self):
test = """
AA BBB C
12 345 6
78 901 2
""".strip()
with pytest.raises(EmptyDataError):
read_fwf(StringIO(test), skiprows=3)
def test_whitespace_preservation(self):
# Addresses Issue #16772
data_expected = """
a ,bbb
cc,dd """
expected = read_csv(StringIO(data_expected), header=None)
test_data = """
a bbb
ccdd """
result = read_fwf(StringIO(test_data), widths=[3, 3],
header=None, skiprows=[0], delimiter="\n\t")
tm.assert_frame_equal(result, expected)
def test_default_delimiter(self):
data_expected = """
a,bbb
cc,dd"""
expected = read_csv(StringIO(data_expected), header=None)
test_data = """
a \tbbb
cc\tdd """
result = read_fwf(StringIO(test_data), widths=[3, 3],
header=None, skiprows=[0])
tm.assert_frame_equal(result, expected)
|
bsd-3-clause
|
marcocaccin/scikit-learn
|
sklearn/utils/tests/test_class_weight.py
|
90
|
12846
|
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes, y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, a ValueError
# should get raised
msg = 'Class label 4 not present.'
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
msg = 'Class label -1 not present.'
class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
|
bsd-3-clause
|
mdrumond/tensorflow
|
tensorflow/examples/learn/wide_n_deep_tutorial.py
|
18
|
8111
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example code for TensorFlow Wide & Deep Tutorial using TF.Learn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import shutil
import sys
import tempfile
import pandas as pd
from six.moves import urllib
import tensorflow as tf
CSV_COLUMNS = [
"age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country",
"income_bracket"
]
gender = tf.feature_column.categorical_column_with_vocabulary_list(
"gender", ["Female", "Male"])
education = tf.feature_column.categorical_column_with_vocabulary_list(
"education", [
"Bachelors", "HS-grad", "11th", "Masters", "9th",
"Some-college", "Assoc-acdm", "Assoc-voc", "7th-8th",
"Doctorate", "Prof-school", "5th-6th", "10th", "1st-4th",
"Preschool", "12th"
])
marital_status = tf.feature_column.categorical_column_with_vocabulary_list(
"marital_status", [
"Married-civ-spouse", "Divorced", "Married-spouse-absent",
"Never-married", "Separated", "Married-AF-spouse", "Widowed"
])
relationship = tf.feature_column.categorical_column_with_vocabulary_list(
"relationship", [
"Husband", "Not-in-family", "Wife", "Own-child", "Unmarried",
"Other-relative"
])
workclass = tf.feature_column.categorical_column_with_vocabulary_list(
"workclass", [
"Self-emp-not-inc", "Private", "State-gov", "Federal-gov",
"Local-gov", "?", "Self-emp-inc", "Without-pay", "Never-worked"
])
# To show an example of hashing:
occupation = tf.feature_column.categorical_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.feature_column.categorical_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# Continuous base columns.
age = tf.feature_column.numeric_column("age")
education_num = tf.feature_column.numeric_column("education_num")
capital_gain = tf.feature_column.numeric_column("capital_gain")
capital_loss = tf.feature_column.numeric_column("capital_loss")
hours_per_week = tf.feature_column.numeric_column("hours_per_week")
# Transformations.
age_buckets = tf.feature_column.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
# Wide columns and deep columns.
base_columns = [
gender, education, marital_status, relationship, workclass, occupation,
native_country, age_buckets,
]
crossed_columns = [
tf.feature_column.crossed_column(
["education", "occupation"], hash_bucket_size=1000),
tf.feature_column.crossed_column(
[age_buckets, "education", "occupation"], hash_bucket_size=1000),
tf.feature_column.crossed_column(
["native_country", "occupation"], hash_bucket_size=1000)
]
deep_columns = [
tf.feature_column.indicator_column(workclass),
tf.feature_column.indicator_column(education),
tf.feature_column.indicator_column(gender),
tf.feature_column.indicator_column(relationship),
# To show an example of embedding
tf.feature_column.embedding_column(native_country, dimension=8),
tf.feature_column.embedding_column(occupation, dimension=8),
age,
education_num,
capital_gain,
capital_loss,
hours_per_week,
]
def maybe_download(train_data, test_data):
"""Maybe downloads training data and returns train and test file names."""
if train_data:
train_file_name = train_data
else:
train_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data",
train_file.name) # pylint: disable=line-too-long
train_file_name = train_file.name
train_file.close()
print("Training data is downloaded to %s" % train_file_name)
if test_data:
test_file_name = test_data
else:
test_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test",
test_file.name) # pylint: disable=line-too-long
test_file_name = test_file.name
test_file.close()
print("Test data is downloaded to %s"% test_file_name)
return train_file_name, test_file_name
def build_estimator(model_dir, model_type):
"""Build an estimator."""
if model_type == "wide":
m = tf.estimator.LinearClassifier(
model_dir=model_dir, feature_columns=base_columns + crossed_columns)
elif model_type == "deep":
m = tf.estimator.DNNClassifier(
model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=[100, 50])
else:
m = tf.estimator.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=crossed_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[100, 50])
return m
def input_fn(data_file, num_epochs, shuffle):
"""Input builder function."""
df_data = pd.read_csv(
tf.gfile.Open(data_file),
names=CSV_COLUMNS,
skipinitialspace=True,
engine="python",
skiprows=1)
# remove NaN elements
df_data = df_data.dropna(how="any", axis=0)
labels = df_data["income_bracket"].apply(lambda x: ">50K" in x).astype(int)
return tf.estimator.inputs.pandas_input_fn(
x=df_data,
y=labels,
batch_size=100,
num_epochs=num_epochs,
shuffle=shuffle,
num_threads=5)
def train_and_eval(model_dir, model_type, train_steps, train_data, test_data):
"""Train and evaluate the model."""
train_file_name, test_file_name = maybe_download(train_data, test_data)
# Specify file path below if want to find the output easily
model_dir = tempfile.mkdtemp() if not model_dir else model_dir
m = build_estimator(model_dir, model_type)
# set num_epochs to None to get infinite stream of data.
m.train(
input_fn=input_fn(train_file_name, num_epochs=None, shuffle=True),
steps=train_steps)
# set steps to None to run evaluation until all data consumed.
results = m.evaluate(
input_fn=input_fn(test_file_name, num_epochs=1, shuffle=False),
steps=None)
print("model directory = %s" % model_dir)
for key in sorted(results):
print("%s: %s" % (key, results[key]))
# Manual cleanup
shutil.rmtree(model_dir)
FLAGS = None
def main(_):
train_and_eval(FLAGS.model_dir, FLAGS.model_type, FLAGS.train_steps,
FLAGS.train_data, FLAGS.test_data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--model_dir",
type=str,
default="",
help="Base directory for output models."
)
parser.add_argument(
"--model_type",
type=str,
default="wide_n_deep",
help="Valid model types: {'wide', 'deep', 'wide_n_deep'}."
)
parser.add_argument(
"--train_steps",
type=int,
default=2000,
help="Number of training steps."
)
parser.add_argument(
"--train_data",
type=str,
default="",
help="Path to the training data."
)
parser.add_argument(
"--test_data",
type=str,
default="",
help="Path to the test data."
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
apache-2.0
|
cg31/tensorflow
|
tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_functions.py
|
10
|
12500
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_queue_runner as fqr
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import queue_runner
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class _ArrayFeedFn(object):
"""Creates feed dictionaries from numpy arrays."""
def __init__(self,
placeholders,
array,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != 2:
raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format(
len(placeholders)))
self._placeholders = placeholders
self._array = array
self._max = len(array)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
integer_indexes = [j % self._max
for j in range(self._trav, self._trav + self._batch_size)
]
if self._epoch_end in integer_indexes:
# after this batch we will have processed self._epoch epochs, possibly
# overshooting a bit to fill out a batch.
self._epoch += 1
self._trav = (integer_indexes[-1] + 1) % self._max
return {self._placeholders[0]: integer_indexes,
self._placeholders[1]: self._array[integer_indexes]}
class _OrderedDictNumpyFeedFn(object):
"""Creates feed dictionaries from `OrderedDict`s of numpy arrays."""
def __init__(self,
placeholders,
ordered_dict_of_arrays,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(ordered_dict_of_arrays) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(ordered_dict_of_arrays), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._ordered_dict_of_arrays = ordered_dict_of_arrays
self._max = len(ordered_dict_of_arrays.values()[0])
for _, v in ordered_dict_of_arrays.items():
if len(v) != self._max:
raise ValueError("Array lengths must match.")
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
integer_indexes = [j % self._max
for j in range(self._trav, self._trav + self._batch_size)
]
if self._epoch_end in integer_indexes:
# after this batch we will have processed self._epoch epochs, possibly
# overshooting a bit to fill out a batch.
self._epoch += 1
self._trav = (integer_indexes[-1] + 1) % self._max
feed_dict = {self._index_placeholder: integer_indexes}
cols = [column[integer_indexes]
for column in self._ordered_dict_of_arrays.values()]
feed_dict.update(dict(zip(self._col_placeholders, cols)))
return feed_dict
class _PandasFeedFn(object):
"""Creates feed dictionaries from pandas `DataFrames`."""
def __init__(self,
placeholders,
dataframe,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(dataframe.columns) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(dataframe.columns), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._dataframe = dataframe
self._max = len(dataframe)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
integer_indexes = [j % self._max
for j in range(self._trav, self._trav + self._batch_size)
]
if self._epoch_end in integer_indexes:
# after this batch we will have processed self._epoch epochs, possibly
# overshooting a bit to fill out a batch.
self._epoch += 1
if self._epoch == self._num_epochs:
# trim this batch, so as not to overshoot the last epoch.
batch_end_inclusive = integer_indexes.index(self._epoch_end)
integer_indexes = integer_indexes[:(batch_end_inclusive+1)]
self._trav = (integer_indexes[-1] + 1) % self._max
result = self._dataframe.iloc[integer_indexes]
cols = [result[col].values for col in result.columns]
feed_dict = dict(zip(self._col_placeholders, cols))
feed_dict[self._index_placeholder] = result.index.values
return feed_dict
def enqueue_data(data,
capacity,
shuffle=False,
min_after_dequeue=None,
num_threads=1,
seed=None,
name="enqueue_input",
enqueue_size=1,
num_epochs=None):
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given array or `DataFrame`. In
the case of a pandas `DataFrame`, the first enqueued `Tensor` corresponds to
the index of the `DataFrame`. For numpy arrays, the first enqueued `Tensor`
contains the row number.
Args:
data: a numpy `ndarray or` pandas `DataFrame` that will be read into the
queue.
capacity: the capacity of the queue.
shuffle: whether or not to shuffle the rows of the array.
min_after_dequeue: minimum number of elements that can remain in the queue
after a dequeue operation. Only used when `shuffle` is true. If not set,
defaults to `capacity` / 4.
num_threads: number of threads used for reading and enqueueing.
seed: used to seed shuffling and reader starting points.
name: a scope name identifying the data.
enqueue_size: the number of rows to enqueue per step.
num_epochs: limit enqueuing to a specified number of epochs, if provided.
Returns:
A queue filled with the rows of the given array or `DataFrame`.
Raises:
TypeError: `data` is not a Pandas `DataFrame` or a numpy `ndarray`.
"""
with ops.name_scope(name):
if isinstance(data, np.ndarray):
types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
queue_shapes = [(), data.shape[1:]]
get_feed_fn = _ArrayFeedFn
elif isinstance(data, collections.OrderedDict):
types = [dtypes.int64] + [dtypes.as_dtype(col.dtype)
for col in data.values()]
queue_shapes = [()] + [col.shape[1:] for col in data.values()]
get_feed_fn = _OrderedDictNumpyFeedFn
elif HAS_PANDAS and isinstance(data, pd.DataFrame):
types = [dtypes.as_dtype(dt)
for dt in [data.index.dtype] + list(data.dtypes)]
queue_shapes = [() for _ in types]
get_feed_fn = _PandasFeedFn
else:
raise TypeError(
"data must be either a numpy array or pandas DataFrame if pandas is "
"installed; got {}".format(type(data).__name__))
# TODO(jamieas): TensorBoard warnings for all warnings below once available.
if num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with num_epochs and num_threads > 1. "
"num_epochs is applied per thread, so this will produce more "
"epochs than you probably intend. "
"If you want to limit epochs, use one thread.")
if shuffle and num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with shuffle=True, num_threads > 1, and "
"num_epochs. This will create multiple threads, all reading the "
"array/dataframe in order adding to the same shuffling queue; the "
"results will likely not be sufficiently shuffled.")
if not shuffle and num_threads > 1:
logging.warning(
"enqueue_data was called with shuffle=False and num_threads > 1. "
"This will create multiple threads, all reading the "
"array/dataframe in order. If you want examples read in order, use"
" one thread; if you want multiple threads, enable shuffling.")
if shuffle:
min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
min_after_dequeue)
queue = data_flow_ops.RandomShuffleQueue(capacity,
min_after_dequeue,
dtypes=types,
shapes=queue_shapes,
seed=seed)
else:
min_after_dequeue = 0 # just for the summary text
queue = data_flow_ops.FIFOQueue(capacity,
dtypes=types,
shapes=queue_shapes)
enqueue_ops = []
feed_fns = []
for i in range(num_threads):
# Note the placeholders have no shapes, so they will accept any
# enqueue_size. enqueue_many below will break them up.
placeholders = [array_ops.placeholder(t) for t in types]
enqueue_ops.append(queue.enqueue_many(placeholders))
seed_i = None if seed is None else (i + 1) * seed
feed_fns.append(get_feed_fn(placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs))
runner = fqr.FeedingQueueRunner(queue=queue,
enqueue_ops=enqueue_ops,
feed_fns=feed_fns)
queue_runner.add_queue_runner(runner)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) * (1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
logging_ops.scalar_summary(summary_name, full)
return queue
|
apache-2.0
|
hmedina/KaSaAn
|
KaSaAn/scripts/kappa_snapshot_largest_complex_time.py
|
1
|
5982
|
#! /usr/bin/env python3
"""
Plot the compostion of the giant component in time from a set of snapshots located in a directory.
``` {.text}
usage: kappa_snapshot_largest_complex_time
[-h] Show detailed help.
[-d DIRECTORY] Directory where snapshots are stored, default is <.>
[-p PATTERN] Pattern that groups desired snapshots names; default 'snap*.ka'.
[-a [...]] Patterns that should be plotted; omiting plots sum formula.
[-o OUTPUT_NAME] The common file name for saving figures; shown if not given.
[-fs WIDTH HEIGHT] Size of the resulting figure, in inches.
[--lin_log] If specified, produce an additional plot with linear X-axis and logarithmic Y-axis.
[--log_lin] If specified, produce an additional plot with logarithmic X-axis and linear Y-axis.
[--log_log] If specified, produce an additional plot with logarithmic X-axis and logarithmic Y-axis.
[--un_stacked] If given, produce regular non-stacked plot.
[--mt THREADS] Launch multiple threads for reading snapshots. Safe, but always less performant: WIP.
```
"""
import argparse
import matplotlib as mpl
import matplotlib.pyplot as plt
from pathlib import Path
from KaSaAn.functions import find_snapshot_names
from KaSaAn.functions.graph_largest_complex_composition import snapshot_list_to_plot_matrix, _make_figure
def main():
"""Plot the evolution of the giant component in time from a set of snapshots located in a directory, showing only
the subset of patterns specified."""
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument('-d', '--directory', type=str, default='.',
help='Name of the directory where snapshots are stored; default is current directory.')
parser.add_argument('-p', '--pattern', type=str, default='snap*.ka',
help='Pattern that should be used to get the snapshot names; default is as produced by KaSim,'
' `snap*.ka`')
parser.add_argument('-a', '--agent-patterns', type=str, default=None, nargs='*',
help='Patterns whose number of symmetry-adjusted embeddings into the giant component'
' should be plotted; leave blank or omit to plot all agent types (i.e. sum formula)'
' instead.')
parser.add_argument('-o', '--output_name', type=str,
help='If specified, the name of the file where the figure should be saved. If not given,'
' figure will be shown instead. If alternate scale options are given, a "_log_lin" or'
' similar will be inserted between the file-name and the extension requested to'
' distinguish the additional requested files.')
parser.add_argument('-fs', '--figure_size', type=float, default=mpl.rcParams['figure.figsize'], nargs=2,
help='Size of the resulting figure, in inches, specified as two elements, width and height'
' (text size is specified in points, so this affects the size of text relative to other'
' graph elements).')
parser.add_argument('--lin_log', action='store_true',
help='If specified, produce an additional plot with linear X-axis and logarithmic Y-axis.')
parser.add_argument('--log_lin', action='store_true',
help='If specified, produce an additional plot with logarithmic X-axis and linear Y-axis.')
parser.add_argument('--log_log', action='store_true',
help='If specified, produce an additional plot with logarithmic X-axis and logarithmic Y-axis.')
parser.add_argument('--un_stacked', action='store_true',
help='If given, produce a conventional plot rather than a filled stacked plot (meant for sum'
' formulae). Useful when plotting patterns that may overlap, ergo whose stacking would not be'
' as intuitive.')
parser.add_argument('-mt', '--multi_thread', type=int, default=1,
help='Number of threads for the concurrent pool of workers to read-in snapshots. Default uses'
' 1, so a single-threaded for-loop.')
args = parser.parse_args()
snap_name_list = find_snapshot_names(target_directory=args.directory, name_pattern=args.pattern)
s_times, p_matrix, pattern_list = snapshot_list_to_plot_matrix(snapshot_names=snap_name_list,
agent_patterns_requested=args.agent_patterns,
thread_number=args.multi_thread)
# scale plot
fig_lin_lin = _make_figure(s_times, p_matrix, pattern_list, args.figure_size, 'linear', 'linear', args.un_stacked)
if args.lin_log:
fig_lin_log = _make_figure(s_times, p_matrix, pattern_list, args.figure_size, 'linear', 'log', args.un_stacked)
if args.log_lin:
fig_log_lin = _make_figure(s_times, p_matrix, pattern_list, args.figure_size, 'log', 'linear', args.un_stacked)
if args.log_log:
fig_log_log = _make_figure(s_times, p_matrix, pattern_list, args.figure_size, 'log', 'log', args.un_stacked)
# save or display?
if args.output_name:
save_path = Path(args.output_name)
fig_lin_lin.savefig(save_path)
if args.lin_log:
fig_lin_log.savefig(save_path.parents[0] / Path(save_path.stem + '_lin_log' + save_path.suffix))
if args.log_lin:
fig_log_lin.savefig(save_path.parents[0] / Path(save_path.stem + '_log_lin' + save_path.suffix))
if args.log_log:
fig_log_log.savefig(save_path.parents[0] / Path(save_path.stem + '_log_log' + save_path.suffix))
else:
plt.show()
if __name__ == '__main__':
main()
|
mit
|
dmsuehir/spark-tk
|
integration-tests/tests/test_frame_pandas.py
|
2
|
1901
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from setup import tc, rm, get_sandbox_path
from sparktk import dtypes
def test_frame_to_pandas_to_frame(tc):
"""
Tests going from a frame to a pandas df (to_pandas) and then back to a frame (import_pandas)
"""
# Create a frame from a csv file for testing
path = "../datasets/importcsvtest.csv"
frame1 = tc.frame.import_csv(path, header=True, infer_schema=True)
# bring to data frame and check the columns/types/row count
df = frame1.to_pandas()
assert(df.columns.tolist() == ['string_column', 'integer_column', 'float_column', 'datetime_column'])
assert([str(d) for d in df.dtypes] == ['object', 'int32', 'float64', 'datetime64[ns]'])
assert(frame1.count() == len(df))
# import the data frame back to a frame
frame2 = tc.frame.import_pandas(df, frame1.schema, validate_schema=True)
# compare this frame to the original frame
assert(len(frame1.schema) == len(frame2.schema))
for col1, col2 in zip(frame1.schema, frame2.schema):
assert(col1[0] == col2[0])
assert(dtypes.dtypes.get_from_type(col1[1]) == dtypes.dtypes.get_from_type(col2[1]))
assert(frame2.take(frame2.count()) == frame1.take(frame1.count()))
|
apache-2.0
|
GabrielRubin/TC2
|
Client/GameDataExplorer.py
|
1
|
5680
|
from CatanGame import *
import numpy as np
import glob
import cPickle
from tkFileDialog import askopenfilename
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
class MyDataFile():
def __init__(self, data, target, featureNames):
self.data = data
self.target = target
self.featureNames = featureNames
allGameData = []
def OpenAllSaveData(path):
files = glob.glob("{0}/*.ctndt".format(path))
for f in files:
try:
with open('{0}'.format(f), 'rb') as handle:
allGameData.append(cPickle.load(handle))
except EOFError as exc:
print(exc)
def SaveAllSavedDataAsTXT(path, fileName):
recordStr = ""
currGameIndex = 1
for data in allGameData:
recordStr += "------ Game {0} ------\n".format(currGameIndex)
recordStr += "BOARD = " + data.boardConfig
for turnRecord in data.turnData:
content = "{0}/{1}".format(turnRecord.gameState.turn, type(turnRecord.action).__name__)
recordStr += "{0}\n".format(content)
recordStr += "+++++++++++++++++++++\n"
currGameIndex += 1
with open("{0}/{1}.txt".format(path, fileName), 'wb') as file:
file.write(recordStr)
def SaveAllDataAsCSV(path, fileName):
import csv
with open("{0}/{1}.csv".format(path, fileName), 'w') as csv_file:
headers = GetHeaders(allGameData[0])
examples = GetExamplesSize(allGameData)
fileHeader = [examples,len(headers)] + headers + ['target']
csvWriter = csv.writer(csv_file, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
csvWriter.writerow(fileHeader)
for data in allGameData:
hexes, numbers = data.GetBoardTerrainAndNumbers()
for turnRecord in data.turnData:
csvWriter.writerow(hexes + numbers +
[data.startingPlayer] +
ComposeTurnCSVLine(turnRecord) +
[ComposeTargetData(data)])
def ComposeTurnCSVLine(turnRecord):
gameState = turnRecord.gameState
rowData = [gameState.turn,
#gameState.currState,
gameState.currPlayer,
gameState.longestRoadPlayer,
gameState.largestArmyPlayer,
sum(gameState.developmentCardsDeck)]
rowData += gameState.boardNodes
rowData += gameState.boardEdges
for player in turnRecord.players:
rowData += ComposePlayerCSVLine(player)
return rowData
def ComposePlayerCSVLine(player):
data = [#player.agentName,
player.seatNumber,
sum(player.resources),
player.knights,
player.victoryPoints]
data += player.numberOfPieces
data += player.developmentCards
return data
def GetHeaders(gameData):
hexes, numbers = gameData.GetBoardTerrainAndNumbers()
h = ["hex{0}".format(n) for n in range(1, len(hexes) + 1)]
numbers = ["n{0}".format(n) for n in range(1, len(numbers) + 1)]
bN = ["bNode{0}".format(n) for n in range(1, len(gameData.turnData[0].gameState.boardNodes) + 1)]
bE = ["bEdge{0}".format(n) for n in range(1, len(gameData.turnData[0].gameState.boardEdges) + 1)]
pData = []
i = 1
for player in gameData.turnData[0].players:
pieces = ["P{0}_piece{1}".format(i, n) for n in range(1, len(player.numberOfPieces) + 1)]
devs = ["P{0}_devC{1}".format(i, n) for n in range(1, len(player.developmentCards) + 1)]
pData += [#"P{0}_agent".format(i),
"P{0}_seat".format(i),
"P{0}_resources".format(i),
"P{0}_knights".format(i),
"P{0}_vp".format(i)] + pieces + devs
i += 1
return h + numbers + ["sPlayer",
"turn", #"state",
"player",
"lgRoads", "lgArmy", "dvCards"] + \
bN + bE + pData
def GetExamplesSize(allData):
i = 0
for data in allData:
i += len(data.turnData)
return i
def ComposeTargetData(gameData):
return gameData.turnData[-1].players[0].victoryPoints
def GetGameTrainingDataFrame(gameData):
headers = GetHeaders(gameData)
n_samples = len(gameData.turnData)
n_features = len(headers)
hexes, numbers = gameData.GetBoardTerrainAndNumbers()
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, turnData in enumerate(gameData.turnData):
entry = hexes + numbers + [gameData.startingPlayer] + ComposeTurnCSVLine(turnData)
tgt = [ComposeTargetData(gameData)]
data[i] = np.asarray(entry, dtype=np.float64)
target[i] = np.asarray(tgt, dtype=np.int)
dataFile = MyDataFile(data=data, target=target, featureNames=headers)
dataFrame = pd.DataFrame(dataFile.data)
dataFrame.columns = dataFile.featureNames
dataFrame['TargetVP'] = dataFile.target
X = dataFrame.drop('TargetVP', axis = 1)
Y = dataFrame['TargetVP']
return X, Y
def GetGameStateDataFrame(gameState, action, boardConfig):
gameData = GameData()
gameData.boardConfig = boardConfig
gameData.AddRecord(action, gameState)
hexes, numbers = gameData.GetBoardTerrainAndNumbers()
entry = hexes + numbers + [gameData.startingPlayer] + ComposeTurnCSVLine(gameData.turnData[0])
data = np.empty((1, len(entry)))
data[0] = np.asarray(entry, dtype=np.float64)
dataFrame = pd.DataFrame(data)
return dataFrame
if __name__ == '__main__':
OpenAllSaveData("GameData")
SaveAllDataAsCSV("GameData", "allCSVData")
|
gpl-3.0
|
majkelx/astwro
|
astwro/starlist/ds9.py
|
1
|
8089
|
from .StarList import StarList
from .file_helpers import *
from .daofiles import parse_dao_hdr, write_dao_header, DAO_file_firstline, DAO
from .file_helpers import as_starlist
import pandas as pd
import re
_ds9_regexp = re.compile(
r'[+-]? *circle[( ] *([+-]?\d+[.]?\d*) *[, ] *([+-]?\d+[.]?\d*).+#.*id *= *(\d+)')
_ds9_wcs_regexp = re.compile(
r'[+-]? *circle[( ] *([+-]?\d+:\d+:\d+[.]?\d*) *[, ] *([+-]?\d+:\d+:\d+[.]?\d*).+#.*id *= *(\d+)')
_ds9_no_id_regexp = re.compile(
r'[+-]? *circle[( ] *([+-]?\d+[.]?\d*) *[, ] *([+-]?\d+[.]?\d*)')
_ds9_no_id_wcs_regexp = re.compile(
r'[+-]? *circle[( ] *([+-]?\d+:\d+:\d+[.]?\d*) *[, ] *([+-]?\d+:\d+:\d+[.]?\d*)')
_ds9_system_wcs = re.compile('fk4|fk5|J2000|B1950|ICRS', re.IGNORECASE)
_ds9_system_xy = re.compile('PHYSICAL|IMAGE', re.IGNORECASE)
def read_ds9_regions(file):
# type: (object) -> StarList
"""
Reads ds9 region
:param file: filename or open input stream
:return: StarList object
Returned object has columns id, x, y, auto_id
Boolean column auto_id indicates weather id for item is read from file (#id=xxx comment) or
generated by function.
"""
f, to_close = get_stream(file, 'rt')
# s = StarList.new()
data = []
data_noid = []
dao_hdr1 = None
hdr = None
sys_wcs, sys_xy = (1,2)
system = None
for line in f:
if line[0] == '#':
if line[1:11] == DAO_file_firstline[:10]: # dao header found in comment
dao_hdr1 = line
continue
if dao_hdr1 is not None: # second line of dao header
hdr = parse_dao_hdr(dao_hdr1, line, '#')
else:
if system is None:
if _ds9_system_wcs.search(line):
system = sys_wcs
elif _ds9_system_xy.search(line):
system = sys_xy
pass
m = _ds9_regexp.search(line)
if m is not None: # s[id] = (id, x, y)
data.append([int(m.group(3)), float(m.group(1)), float(m.group(2))])
else:
m = _ds9_wcs_regexp.search(line)
if m is not None: # s[id] = (id, ra, dec)
data.append([int(m.group(3)), str(m.group(1)), str(m.group(2))])
else:
m = _ds9_no_id_regexp.search(line) # s[?] = (x, y)
if m is not None:
data_noid.append([float(m.group(1)), float(m.group(2))])
else:
m = _ds9_no_id_wcs_regexp.search(line) # s[?] = (ra, dec)
if m is not None:
data_noid.append([str(m.group(1)), str(m.group(2))])
dao_hdr1 = None
close_files(to_close)
if system == sys_wcs:
s = StarList(data, columns = ['id', 'ra', 'dec'])
s_noid = StarList(data_noid, columns=['ra', 'dec'])
else:
s = StarList(data, columns = ['id', 'x', 'y'])
s_noid = StarList(data_noid, columns = ['x', 'y'])
s.index = s['id']
s['auto_id'] = False
if not s_noid.empty:
id_starts_from = 1 if s.empty else s.id.max() + 1
ids = range(id_starts_from, id_starts_from + s_noid.stars_number())
s_noid['id'] = ids
s_noid.index = ids
s_noid['auto_id'] = True
if s.empty:
s = s_noid
else:
s = s.append(s_noid)
s.DAO_hdr = hdr
s.DAO_type = DAO.RADEC_FILE if system == sys_wcs else DAO.XY_FILE
return s
def write_ds9_regions(starlist, filename,
color='green', width=1, size=None, font=None, label='{id:.0f}',
exclude=None, indexes=None, colors=None, sizes=None, labels=None,
color_column=None, size_column=None,
comment=None, add_global=None, WCS=False):
"""
Writes ds9 region file.
Some regions can be visually distinguish by providing additional indexes to select those regions
with specific attributes
:param StarList starlist: StarList object to dump
:param str filename: output filename or stream open for writing
:param str color: default color
:param int width: default line width
:param int size: default radius (default 8px or 2")
:param str font: ds9 font specification e.g. "times 12 bold italic"
:param str label: format expression for label, use col names
:param pd.Index exclude: index of disabled regions, if None all are enabled
:param [pd.Index] indexes: additional indexes to include specific color and size attributes
:param [str] colors: specific colors for indexes
:param [int] sizes: specific sizes for indexes
:param [str] labels: specific labels for indexes
:param str color_column: column of starlist with color values
:param str size_column: column of starlist with size values
:param str add_global: content of additional 'global' if not None
:param str comment: content of additional comment line if not None
:param bool or str WCS: If true, columns `ra` and `dec` will be used and coord system set to ICRS
If nonepmpty string, string will be used as system description
If None, False or '', columns 'x','y' will be used and system set to IMAGE
Example:
write_ds9_regions(sl, 'i.reg', color='blue',
indexes=[saturated, psf],
colours=['yellow', 'red'],
sizes=[12, None],
labels=[None, 'PDF:{id}'],
exclude=faint)
Generates regions file i.reg of blue circles, radius 8,
objects present in index saturated will have larger yellow circles
objects present in index psf will be red and labeled with prefix PSF:
objects present in index faint will be disabled by '-' sign and not displayed by ds9, but can be parsed back
"""
if WCS:
xcol = 'ra'
ycol = 'dec'
starlist = as_starlist(starlist)
else:
xcol = 'x'
ycol = 'y'
starlist = as_starlist(starlist, updateskycoord=False)
try: (starlist[xcol], starlist[ycol])
except KeyError as e:
raise KeyError('No coordinate columns ({},{}) in starlist. Check WCS parameter also'.format(xcol, ycol))
f, to_close = get_stream(filename, 'w')
f.write('# Region file format: DS9 version 4.0\n')
if starlist.DAO_hdr is not None:
write_dao_header(starlist.DAO_hdr, f, '#')
if comment is not None:
f.write('#{}\n'.format(comment))
if color is not None:
f.write('global color={}\n'.format(color))
if width is not None:
f.write('global width={}\n'.format(width))
if font is not None:
f.write('global font={}\n'.format(font))
if add_global is not None:
f.write('global {}\n'.format(add_global))
if not WCS:
f.write('image\n')
else:
system = WCS if isinstance(WCS, str) else 'icrs'
f.write(system+'\n')
for i, row in starlist.iterrows():
if exclude is not None and i in exclude:
f.write('-')
if size is not None:
s = size
else:
s = '2"' if WCS else 8
text = label.format(**row)
c = ''
if size_column is not None:
s = row[size_column]
if color_column is not None:
c = ' color=' + row[color_column]
if indexes is not None:
for n in range(len(indexes)):
if i in indexes[n]:
if sizes and sizes[n] is not None:
s = sizes[n]
if colors and colors[n] is not None:
c = ' color=' + colors[n]
if labels and labels[n] is not None:
text = labels[n].format(**row)
f.write('circle({},{},{}) #{} text="{}" id={:d}\n'.format(row[xcol], row[ycol], s, c, text, i))
close_files(to_close)
|
mit
|
realisticus/data-science-from-scratch
|
code/nearest_neighbors.py
|
57
|
7357
|
from __future__ import division
from collections import Counter
from linear_algebra import distance
from statistics import mean
import math, random
import matplotlib.pyplot as plt
def raw_majority_vote(labels):
votes = Counter(labels)
winner, _ = votes.most_common(1)[0]
return winner
def majority_vote(labels):
"""assumes that labels are ordered from nearest to farthest"""
vote_counts = Counter(labels)
winner, winner_count = vote_counts.most_common(1)[0]
num_winners = len([count
for count in vote_counts.values()
if count == winner_count])
if num_winners == 1:
return winner # unique winner, so return it
else:
return majority_vote(labels[:-1]) # try again without the farthest
def knn_classify(k, labeled_points, new_point):
"""each labeled point should be a pair (point, label)"""
# order the labeled points from nearest to farthest
by_distance = sorted(labeled_points,
key=lambda (point, _): distance(point, new_point))
# find the labels for the k closest
k_nearest_labels = [label for _, label in by_distance[:k]]
# and let them vote
return majority_vote(k_nearest_labels)
cities = [(-86.75,33.5666666666667,'Python'),(-88.25,30.6833333333333,'Python'),(-112.016666666667,33.4333333333333,'Java'),(-110.933333333333,32.1166666666667,'Java'),(-92.2333333333333,34.7333333333333,'R'),(-121.95,37.7,'R'),(-118.15,33.8166666666667,'Python'),(-118.233333333333,34.05,'Java'),(-122.316666666667,37.8166666666667,'R'),(-117.6,34.05,'Python'),(-116.533333333333,33.8166666666667,'Python'),(-121.5,38.5166666666667,'R'),(-117.166666666667,32.7333333333333,'R'),(-122.383333333333,37.6166666666667,'R'),(-121.933333333333,37.3666666666667,'R'),(-122.016666666667,36.9833333333333,'Python'),(-104.716666666667,38.8166666666667,'Python'),(-104.866666666667,39.75,'Python'),(-72.65,41.7333333333333,'R'),(-75.6,39.6666666666667,'Python'),(-77.0333333333333,38.85,'Python'),(-80.2666666666667,25.8,'Java'),(-81.3833333333333,28.55,'Java'),(-82.5333333333333,27.9666666666667,'Java'),(-84.4333333333333,33.65,'Python'),(-116.216666666667,43.5666666666667,'Python'),(-87.75,41.7833333333333,'Java'),(-86.2833333333333,39.7333333333333,'Java'),(-93.65,41.5333333333333,'Java'),(-97.4166666666667,37.65,'Java'),(-85.7333333333333,38.1833333333333,'Python'),(-90.25,29.9833333333333,'Java'),(-70.3166666666667,43.65,'R'),(-76.6666666666667,39.1833333333333,'R'),(-71.0333333333333,42.3666666666667,'R'),(-72.5333333333333,42.2,'R'),(-83.0166666666667,42.4166666666667,'Python'),(-84.6,42.7833333333333,'Python'),(-93.2166666666667,44.8833333333333,'Python'),(-90.0833333333333,32.3166666666667,'Java'),(-94.5833333333333,39.1166666666667,'Java'),(-90.3833333333333,38.75,'Python'),(-108.533333333333,45.8,'Python'),(-95.9,41.3,'Python'),(-115.166666666667,36.0833333333333,'Java'),(-71.4333333333333,42.9333333333333,'R'),(-74.1666666666667,40.7,'R'),(-106.616666666667,35.05,'Python'),(-78.7333333333333,42.9333333333333,'R'),(-73.9666666666667,40.7833333333333,'R'),(-80.9333333333333,35.2166666666667,'Python'),(-78.7833333333333,35.8666666666667,'Python'),(-100.75,46.7666666666667,'Java'),(-84.5166666666667,39.15,'Java'),(-81.85,41.4,'Java'),(-82.8833333333333,40,'Java'),(-97.6,35.4,'Python'),(-122.666666666667,45.5333333333333,'Python'),(-75.25,39.8833333333333,'Python'),(-80.2166666666667,40.5,'Python'),(-71.4333333333333,41.7333333333333,'R'),(-81.1166666666667,33.95,'R'),(-96.7333333333333,43.5666666666667,'Python'),(-90,35.05,'R'),(-86.6833333333333,36.1166666666667,'R'),(-97.7,30.3,'Python'),(-96.85,32.85,'Java'),(-95.35,29.9666666666667,'Java'),(-98.4666666666667,29.5333333333333,'Java'),(-111.966666666667,40.7666666666667,'Python'),(-73.15,44.4666666666667,'R'),(-77.3333333333333,37.5,'Python'),(-122.3,47.5333333333333,'Python'),(-89.3333333333333,43.1333333333333,'R'),(-104.816666666667,41.15,'Java')]
cities = [([longitude, latitude], language) for longitude, latitude, language in cities]
def plot_state_borders(plt, color='0.8'):
pass
def plot_cities():
# key is language, value is pair (longitudes, latitudes)
plots = { "Java" : ([], []), "Python" : ([], []), "R" : ([], []) }
# we want each language to have a different marker and color
markers = { "Java" : "o", "Python" : "s", "R" : "^" }
colors = { "Java" : "r", "Python" : "b", "R" : "g" }
for (longitude, latitude), language in cities:
plots[language][0].append(longitude)
plots[language][1].append(latitude)
# create a scatter series for each language
for language, (x, y) in plots.iteritems():
plt.scatter(x, y, color=colors[language], marker=markers[language],
label=language, zorder=10)
plot_state_borders(plt) # assume we have a function that does this
plt.legend(loc=0) # let matplotlib choose the location
plt.axis([-130,-60,20,55]) # set the axes
plt.title("Favorite Programming Languages")
plt.show()
def classify_and_plot_grid(k=1):
plots = { "Java" : ([], []), "Python" : ([], []), "R" : ([], []) }
markers = { "Java" : "o", "Python" : "s", "R" : "^" }
colors = { "Java" : "r", "Python" : "b", "R" : "g" }
for longitude in range(-130, -60):
for latitude in range(20, 55):
predicted_language = knn_classify(k, cities, [longitude, latitude])
plots[predicted_language][0].append(longitude)
plots[predicted_language][1].append(latitude)
# create a scatter series for each language
for language, (x, y) in plots.iteritems():
plt.scatter(x, y, color=colors[language], marker=markers[language],
label=language, zorder=0)
plot_state_borders(plt, color='black') # assume we have a function that does this
plt.legend(loc=0) # let matplotlib choose the location
plt.axis([-130,-60,20,55]) # set the axes
plt.title(str(k) + "-Nearest Neighbor Programming Languages")
plt.show()
#
# the curse of dimensionality
#
def random_point(dim):
return [random.random() for _ in range(dim)]
def random_distances(dim, num_pairs):
return [distance(random_point(dim), random_point(dim))
for _ in range(num_pairs)]
if __name__ == "__main__":
# try several different values for k
for k in [1, 3, 5, 7]:
num_correct = 0
for location, actual_language in cities:
other_cities = [other_city
for other_city in cities
if other_city != (location, actual_language)]
predicted_language = knn_classify(k, other_cities, location)
if predicted_language == actual_language:
num_correct += 1
print k, "neighbor[s]:", num_correct, "correct out of", len(cities)
dimensions = range(1, 101, 5)
avg_distances = []
min_distances = []
random.seed(0)
for dim in dimensions:
distances = random_distances(dim, 10000) # 10,000 random pairs
avg_distances.append(mean(distances)) # track the average
min_distances.append(min(distances)) # track the minimum
print dim, min(distances), mean(distances), min(distances) / mean(distances)
|
unlicense
|
justacec/bokeh
|
bokeh/charts/models.py
|
9
|
8430
|
from __future__ import absolute_import
from six import iteritems
import pandas as pd
from bokeh.models.renderers import GlyphRenderer
from bokeh.models.sources import ColumnDataSource
from bokeh.core.properties import (HasProps, String, Either, Float, Color, Instance, List,
Any, Dict)
from .properties import ColumnLabel, Column
class CompositeGlyph(HasProps):
"""Represents a subset of data.
A collection of hetero or homogeneous glyph
renderers which represent a subset of data. The
purpose of the composite glyph is to abstract
away the details of constructing glyphs, based on
the details of a subset of data, from the grouping
operations that a generalized builders must implement.
In general, the Builder operates at the full column
oriented data source level, segmenting and assigning
attributes from a large selection, while the composite glyphs
will typically be passed an array-like structures with
one or more singular attributes to apply.
Another way to explain the concept is that the Builder
operates as the groupby, as in pandas, while the
CompositeGlyph operates as the function used in the apply.
What is the responsibility of the Composite Glyph?
- Produce GlyphRenderers
- Apply any aggregations
- Tag the GlyphRenderers with the group label
- Apply transforms due to chart operations
- Note: Operations require implementation of special methods
"""
# composite glyph inputs
label = Either(String, Dict(String, Any), default='None',
help='Identifies the subset of data.')
values = Either(Column(Float), Column(String), help="""
Array-like values, which are used as the input to the composite glyph.
Most composite glyphs add their own representation of one or more values-like
columns/arrays that they receive as inputs. These are compiled together for
generating `source`, `data`, and `df` by the individual composite glyphs.
""")
# derived from inputs
source = Instance(ColumnDataSource, help="""The data source used for the contained
glyph renderers. Simple glyphs part of the composite glyph might not use the
column data source.""")
renderers = List(Instance(GlyphRenderer))
glyphs = Dict(String, Any) # where we expect a Glyph class as Value
operations = List(Any, help="""A list of chart operations that can be applied to
manipulate their visual depiction.""")
color = Color(default='gray', help="""A high level color. Some glyphs will
implement more specific color attributes for parts or specific glyphs.""")
fill_color = Color(default="gray")
line_color = Color(default='black', help="""A default outline color for contained
glyphs.""")
fill_alpha = Float(default=0.8)
line_alpha = Float(default=1.0)
left_buffer = Float(default=0.0)
right_buffer = Float(default=0.0)
top_buffer = Float(default=0.0)
bottom_buffer = Float(default=0.0)
def __init__(self, **properties):
vals = properties.get('values')
if String().is_valid(vals) or Float().is_valid(vals):
properties['values'] = [vals]
super(CompositeGlyph, self).__init__(**properties)
self.setup()
def setup(self):
"""Build renderers and data source and set sources on renderers."""
self.renderers = [renderer for renderer in self.build_renderers()]
if self.renderers is not None:
self.refresh()
def refresh(self):
"""Update the GlyphRenderers.
.. note:
this method would be called after data is added.
"""
if self.renderers is not None:
data = self.build_source()
if data is not None:
if isinstance(data, dict):
source = ColumnDataSource(data)
if not isinstance(source, ColumnDataSource) and source is not None:
raise TypeError('build_source must return dict or ColumnDataSource.')
else:
self.source = self.add_chart_index(source)
self._set_sources()
@property
def data(self):
if self.source is not None:
return self.source.data
else:
return {}
@property
def df(self):
if self.data:
return pd.DataFrame(self.data)
else:
return pd.DataFrame()
def add_chart_index(self, data):
"""Add identifier of the data group as a column for each row.
Args:
data (dict or `ColumnDataSource`): can be the type of data used internally
to ColumnDataSource, or a ColumnDataSource.
Returns:
dict or `ColumnDataSource`: returns the same type of data provided
"""
if isinstance(data, ColumnDataSource):
source = data
data = source.data
else:
source = None
# add chart index to data
if 'chart_index' not in data and len(list(data.keys())) > 0:
n_rows = len(list(data.values())[0])
# add composite chart index as column
data['chart_index'] = [self.label] * n_rows
# add constant value for each column in chart index
if isinstance(self.label, dict):
for col, val in iteritems(self.label):
data[col] = [val] * n_rows
if source is not None:
source.data = data
return source
else:
return data
def build_renderers(self):
yield GlyphRenderer()
def build_source(self):
data = {}
if self.values is not None:
data = {'values': self.values}
return data
def _set_sources(self):
"""Store reference to source in each GlyphRenderer.
.. note::
if the glyphs that are part of the composite glyph differ, you may have to
override this method and handle the sources manually.
"""
for renderer in self.renderers:
renderer.data_source = self.source
def __stack__(self, glyphs):
"""A special method the `stack` function applies to composite glyphs."""
pass
def __jitter__(self, glyphs):
"""A special method the `jitter` function applies to composite glyphs."""
pass
def __dodge__(self, glyphs):
"""A special method the `dodge` function applies to composite glyphs."""
pass
def __overlay__(self, glyphs):
"""A special method the `overlay` function applies to composite glyphs."""
pass
def apply_operations(self):
pass
@classmethod
def glyph_properties(cls):
props = {}
for name, glyph in iteritems(cls.glyphs):
props[name] = glyph.class_properties(withbases=True)
return props
class CollisionModifier(HasProps):
"""Models an special type of operation that alters how glyphs interact.
Used to handle the manipulation of glyphs for operations, such as stacking. The
list of `CompositeGlyph`s can either be input into the `CollisionModifier` as
keyword args, or added individually with the `add_glyph` method.
"""
comp_glyphs = List(Instance(CompositeGlyph), help="""A list of composite glyphs,
to apply the modification to.""")
name = String(help="""The name of the collision modifier.""")
method_name = String(help="""The name of the method that will be utilized on
the composite glyphs. This method must exist on all `comp_glyphs`.""")
columns = Either(ColumnLabel, List(ColumnLabel), help="""Some collision modifiers
might require column labels to apply the operation in relation to.""")
def add_glyph(self, comp_glyph):
self.comp_glyphs.append(comp_glyph)
def apply(self, renderers=None):
if len(self.comp_glyphs) == 0:
self.comp_glyphs = renderers
if len(self.comp_glyphs) > 0:
# the first renderer's operation method is applied to the rest
getattr(self.comp_glyphs[0], self.method_name)(self.comp_glyphs)
else:
raise AttributeError('%s must be applied to available renderers, none found.' %
self.__class__.__name__)
|
bsd-3-clause
|
lin-credible/scikit-learn
|
examples/calibration/plot_calibration.py
|
225
|
4795
|
"""
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
|
bsd-3-clause
|
sdss/marvin
|
tests/utils/test_maskbit.py
|
1
|
7930
|
#!/usr/bin/env python
# encoding: utf-8
#
# test_maskbit.py
#
# @Author: Brett Andrews <andrews>
# @Date: 2017-10-06 10:10:00
# @Last modified by: José Sánchez-Gallego ([email protected])
# @Last modified time: 2018-11-26 12:08:06
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
import pytest
from marvin.utils.general.maskbit import Maskbit
bits = [(0, 'BITZERO', 'The zeroth bit.'),
(1, 'BITONE', 'The first bit.'),
(2, 'BITTWO', 'The second bit.'),
(3, 'BITTHREE', 'The third bit'),
(4, 'BITFOUR', 'The fourth bit')]
schema = pd.DataFrame(bits, columns=['bit', 'label', 'description'])
name = 'MYMASK'
description = 'My first Maskbit.'
mask = np.array([[0, 1], [2, 12]])
custom_mask = np.array([[1, 5], [7, 11]])
class TestMaskbit(object):
def test_maskbit_init_with_schema(self, name=name, schema=schema, description=description):
mb = Maskbit(name=name, schema=schema, description=description)
assert np.all(mb.schema == schema)
assert mb.name == name
assert mb.description == description
assert str(mb) == "<Maskbit 'MYMASK' None>".format(schema)
@pytest.mark.parametrize('name',
['MANGA_TARGET1',
'MANGA_DAPPIXMASK'])
def test_maskbit_init_from_name(self, name):
mb = Maskbit(name=name)
assert mb.name == name
assert isinstance(mb.schema, pd.DataFrame)
assert mb.description is None
def test_values_to_bits_no_value_error(self, name=name, schema=schema, description=description):
mb = Maskbit(name=name, schema=schema, description=description)
with pytest.raises(AssertionError) as ee:
mb.values_to_bits(values=None)
assert 'Must provide values.' in str(ee.value)
@pytest.mark.parametrize('values, expected',
[(None, [[[], [0]], [[1], [2, 3]]]),
(0, []),
(3, [0, 1]),
(np.array([1, 3]), [[0], [0, 1]]),
(np.array([[0, 2], [1, 5]]), [[[], [1]], [[0], [0, 2]]])])
def test_values_to_bits(self, values, expected):
mb = Maskbit(name=name, schema=schema, description=description)
if values is None:
mb.mask = mask
actual = mb.values_to_bits(values=values)
assert actual == expected
@pytest.mark.parametrize('value, expected',
[(0, []),
(3, [0, 1])])
def test_value_to_bits(self, value, expected):
mb = Maskbit(name=name, schema=schema, description=description)
actual = mb._value_to_bits(value, schema.bit.values)
assert actual == expected
@pytest.mark.parametrize('values, expected',
[(None, [[[], ['BITZERO']], [['BITONE'], ['BITTWO', 'BITTHREE']]]),
(0, []),
(3, ['BITZERO', 'BITONE']),
(np.array([1, 3]), [['BITZERO'], ['BITZERO', 'BITONE']]),
(np.array([[0, 2], [1, 3]]), [[[], ['BITONE']], [['BITZERO'], ['BITZERO', 'BITONE']]])])
def test_values_to_labels(self, values, expected):
mb = Maskbit(name=name, schema=schema, description=description)
if values is None:
mb.mask = mask
actual = mb.values_to_labels(values=values)
assert actual == expected
@pytest.mark.parametrize('bits_in, expected',
[([], []),
([0], ['BITZERO']),
([1], ['BITONE']),
([[1]], [['BITONE']]),
([[0, 1], [2]], [['BITZERO', 'BITONE'], ['BITTWO']])])
def test_bits_to_labels(self, bits_in, expected):
mb = Maskbit(name=name, schema=schema, description=description)
actual = mb._bits_to_labels(bits_in)
assert actual == expected
@pytest.mark.parametrize('labels, expected',
[('BITONE', 2),
(['BITONE'], 2),
(['BITONE', 'BITTWO'], 6)])
def test_labels_to_value(self, labels, expected):
mb = Maskbit(name=name, schema=schema, description=description)
actual = mb.labels_to_value(labels)
assert actual == expected
@pytest.mark.parametrize('labels, expected',
[('BITONE', [1]),
(['BITONE'], [1]),
(['BITONE', 'BITTWO'], [1, 2])])
def test_labels_to_bits(self, labels, expected):
mb = Maskbit(name=name, schema=schema, description=description)
actual = mb.labels_to_bits(labels)
assert actual == expected
@pytest.mark.parametrize('labels, expected',
[('BITONE', np.array([[0, 0], [2, 0]])),
(['BITONE'], np.array([[0, 0], [2, 0]])),
(['BITTWO', 'BITTHREE'], np.array([[0, 0], [0, 12]]))])
def test_get_mask_int(self, labels, expected):
mb = Maskbit(name=name, schema=schema, description=description)
mb.mask = mask
actual = mb.get_mask(labels)
assert (actual == expected).all()
@pytest.mark.parametrize('labels, expected',
[('BITONE', np.array([[False, False], [True, False]])),
(['BITONE'], np.array([[False, False], [True, False]])),
(['BITTWO', 'BITTHREE'], np.array([[False, False], [False, True]]))])
def test_get_mask_bool(self, labels, expected):
mb = Maskbit(name=name, schema=schema, description=description)
mb.mask = mask
actual = mb.get_mask(labels, dtype=bool)
assert (actual == expected).all()
@pytest.mark.parametrize('labels, expected',
[('BITONE', np.array([[0, 0], [2, 2]])),
(['BITONE'], np.array([[0, 0], [2, 2]])),
(['BITONE', 'BITTHREE'], np.array([[0, 0], [2, 10]]))])
def test_get_mask_custom_mask_int(self, labels, expected):
mb = Maskbit(name=name, schema=schema, description=description)
mb.mask = mask
actual = mb.get_mask(labels, mask=custom_mask)
assert (actual == expected).all()
@pytest.mark.parametrize('labels, expected',
[('BITONE', np.array([[False, False], [True, True]])),
(['BITONE'], np.array([[False, False], [True, True]])),
(['BITONE', 'BITTHREE'], np.array([[False, False], [True, True]]))])
def test_get_mask_custom_mask_bool(self, labels, expected):
mb = Maskbit(name=name, schema=schema, description=description)
mb.mask = mask
actual = mb.get_mask(labels, mask=custom_mask, dtype=bool)
assert (actual == expected).all()
@pytest.mark.parametrize('labels', ['BITFAKE', ['BITFAKE', 'BITTHREE']])
def test_get_mask_nonpresent_label(self, labels):
mb = Maskbit(name=name, schema=schema, description=description)
with pytest.raises(ValueError) as ee:
mb.get_mask(labels)
assert 'label \'BITFAKE\' not found in the maskbit schema.' in str(ee)
@pytest.mark.parametrize('labels, dtype, expected',
[('BITFOUR', bool, np.array([[False, False], [False, False]])),
('BITFOUR', int, np.array([[0, 0], [0, 0]]))])
def test_get_mask_empty(self, labels, dtype, expected):
mb = Maskbit(name=name, schema=schema, description=description)
mb.mask = mask
actual = mb.get_mask(labels, mask=custom_mask, dtype=dtype)
assert (actual == expected).all()
|
bsd-3-clause
|
mikehulluk/morphforge
|
src/morphforge/morphology/__init__.py
|
1
|
2370
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
""" A package for handling neuron morphology models
This package provides an object model for representing neuronal morphologies,
as well as tools for import/export from files; construction of morphologies
within python; traversal of the datastructures; rendering to MayaVI &
matplotlib and export to triangular mesh.
Internally, morphforge provides a dual representation for morphologies, either
**TreeBased:** (:py:class:`~.core.tree.MorphologyTree`) or **VertexBased:**
(:py:class:`~.core.array.MorphologyArray`). More information about these can
found at XX.
.. todo::
<LINK>
"""
from morphforge.morphology.core import MorphologyTree
from morphforge.morphology.core import MorphologyArray
# Ensure that the plugins get added dynamically:
import morphforge.morphology.importer
import morphforge.morphology.exporter
|
bsd-2-clause
|
bkendzior/scipy
|
scipy/special/c_misc/struve_convergence.py
|
23
|
3678
|
"""
Convergence regions of the expansions used in ``struve.c``
Note that for v >> z both functions tend rapidly to 0,
and for v << -z, they tend to infinity.
The floating-point functions over/underflow in the lower left and right
corners of the figure.
Figure legend
=============
Red region
Power series is close (1e-12) to the mpmath result
Blue region
Asymptotic series is close to the mpmath result
Green region
Bessel series is close to the mpmath result
Dotted colored lines
Boundaries of the regions
Solid colored lines
Boundaries estimated by the routine itself. These will be used
for determining which of the results to use.
Black dashed line
The line z = 0.7*|v| + 12
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
import mpmath
def err_metric(a, b, atol=1e-290):
m = abs(a - b) / (atol + abs(b))
m[np.isinf(b) & (a == b)] = 0
return m
def do_plot(is_h=True):
from scipy.special._ufuncs import \
_struve_power_series, _struve_asymp_large_z, _struve_bessel_series
vs = np.linspace(-1000, 1000, 91)
zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]])
rp = _struve_power_series(vs[:,None], zs[None,:], is_h)
ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h)
rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h)
mpmath.mp.dps = 50
if is_h:
sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z)))
else:
sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z)))
ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:])
err_a = err_metric(ra[0], ex) + 1e-300
err_p = err_metric(rp[0], ex) + 1e-300
err_b = err_metric(rb[0], ex) + 1e-300
err_est_a = abs(ra[1]/ra[0])
err_est_p = abs(rp[1]/rp[0])
err_est_b = abs(rb[1]/rb[0])
z_cutoff = 0.7*abs(vs) + 12
levels = [-1000, -12]
plt.cla()
plt.hold(1)
plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1)
plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':'])
lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-'])
la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-'])
lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-'])
plt.clabel(lp, fmt={-1000: 'P', -12: 'P'})
plt.clabel(la, fmt={-1000: 'A', -12: 'A'})
plt.clabel(lb, fmt={-1000: 'B', -12: 'B'})
plt.plot(vs, z_cutoff, 'k--')
plt.xlim(vs.min(), vs.max())
plt.ylim(zs.min(), zs.max())
plt.xlabel('v')
plt.ylabel('z')
def main():
plt.clf()
plt.subplot(121)
do_plot(True)
plt.title('Struve H')
plt.subplot(122)
do_plot(False)
plt.title('Struve L')
plt.savefig('struve_convergence.png')
plt.show()
if __name__ == "__main__":
import os
import sys
if '--main' in sys.argv:
main()
else:
import subprocess
subprocess.call([sys.executable, os.path.join('..', '..', '..', 'runtests.py'),
'-g', '--python', __file__, '--main'])
|
bsd-3-clause
|
jpinedaf/pyspeckit
|
pyspeckit/spectrum/widgets.py
|
4
|
15585
|
from __future__ import print_function
from six.moves import xrange
from matplotlib.widgets import Widget,Button,Slider
import matplotlib
import warnings
class dictlist(list):
def __init__(self, *args):
list.__init__(self, *args)
self._dict = {}
self._dict_index = {}
for ii,value in enumerate(self):
if len(value) == 2:
self._dict[value[0]] = value[1]
self._dict_index[value[0]] = ii
self._dict_index[ii] = value[0]
else:
self._dict[ii] = value
self._dict_index[ii] = ii
def __getitem__(self, key):
if type(key) is int:
return super(dictlist,self).__getitem__(key)
else:
return self._dict[key]
def __setitem__(self, key, value):
if type(key) is int:
super(dictlist,self).__setitem__(key,value)
self._dict[self._dict_index[key]] = value
else:
if key in self._dict:
self._dict[key] = value
self[self._dict_index[key]] = value
else:
self._dict[key] = value
self._dict_index[key] = len(self)
self._dict_index[len(self)] = key
self.append(value)
def __slice__(self, s1, s2):
pass
def values(self):
return [self._dict[self._dict_index[ii]] for ii in xrange(len(self))]
def keys(self):
return [self._dict_index[ii] for ii in xrange(len(self))]
class ModifiableSlider(Slider):
def set_valmin(self, valmin):
"""
Change the minimum value of the slider
"""
self.valmin = valmin
self.ax.set_xlim((self.valmin,self.valmax))
if self.val < self.valmin:
self.set_val(self.valmin)
if self.valinit < self.valmin:
self.valinit = (self.valmax-self.valmin)/2. + self.valmin
if self.vline in self.ax.lines:
self.ax.lines.remove(self.vline)
self.vline = self.ax.axvline(self.valinit,0,1, color='r', lw=1)
def set_valmax(self, valmax):
"""
Change the maximum value of the slider
"""
self.valmax = valmax
self.ax.set_xlim((self.valmin,self.valmax))
if self.val > self.valmax:
self.set_val(self.valmax)
if self.valinit > self.valmax:
self.valinit = (self.valmax-self.valmin)/2. + self.valmin
if self.vline in self.ax.lines:
self.ax.lines.remove(self.vline)
self.vline = self.ax.axvline(self.valinit,0,1, color='r', lw=1)
class FitterSliders(Widget):
"""
A tool to adjust to subplot params of a :class:`matplotlib.figure.Figure`
"""
def __init__(self, specfit, targetfig, npars=1, toolfig=None, parlimitdict={}):
"""
*targetfig*
The figure instance to adjust
*toolfig*
The figure instance to embed the subplot tool into. If
None, a default figure will be created. If you are using
this from the GUI
"""
self.targetfig = targetfig
self.specfit = specfit
self.parlimitdict = parlimitdict
from matplotlib import pyplot
if toolfig is None:
tbar = matplotlib.rcParams['toolbar'] # turn off the navigation toolbar for the toolfig
matplotlib.rcParams['toolbar'] = 'None'
self.toolfig = pyplot.figure(figsize=(6,3))
if hasattr(targetfig.canvas.manager,'window'):
if hasattr(targetfig.canvas.manager.window, 'title'):
self.toolfig.canvas.set_window_title("Fit Sliders for "+targetfig.canvas.manager.window.title())
elif hasattr(targetfig.canvas.manager.window, 'windowTitle'):
self.toolfig.canvas.set_window_title("Fit Sliders for "+targetfig.canvas.manager.window.windowTitle())
else:
warnings.warn("Only Qt4 and TkAgg support window titles (apparently)")
self.toolfig.subplots_adjust(top=0.9,left=0.2,right=0.9)
matplotlib.rcParams['toolbar'] = tbar
else:
self.toolfig = toolfig
self.toolfig.subplots_adjust(left=0.2, right=0.9)
bax = self.toolfig.add_axes([0.8, 0.05, 0.15, 0.075])
self.buttonreset = Button(bax, 'Reset')
self.set_sliders(parlimitdict)
def reset(event):
thisdrawon = self.drawon
self.drawon = False
# store the drawon state of each slider
bs = []
for slider in self.sliders:
bs.append(slider.drawon)
slider.drawon = False
# reset the slider to the initial position
for slider in self.sliders:
slider.reset()
# reset drawon
for slider, b in zip(self.sliders, bs):
slider.drawon = b
# draw the canvas
self.drawon = thisdrawon
if self.drawon:
self.toolfig.canvas.draw()
self.targetfig.canvas.draw()
# during reset there can be a temporary invalid state
# depending on the order of the reset so we turn off
# validation for the resetting
validate = self.toolfig.subplotpars.validate
self.toolfig.subplotpars.validate = False
self.buttonreset.on_clicked(reset)
self.toolfig.subplotpars.validate = validate
def clear_sliders(self):
"""
Get rid of the sliders...
"""
try:
for sl in self.sliders:
sl.ax.remove()
except NotImplementedError:
for sl in self.sliders:
self.specfit.Spectrum.plotter.figure.delaxes(sl.ax)
self.specfit.Spectrum.plotter.refresh()
def set_sliders(self, parlimitdict={}):
"""
Set the slider properties, actions, and values
can also reset their limits
"""
def update(value):
mpp = [slider.val for slider in self.sliders]
for line in self.specfit.modelplot:
line.set_ydata(self.specfit.get_model_frompars(line.get_xdata(),mpp))
# update components too
for ii,line in enumerate(self.specfit._plotted_components):
xdata = line.get_xdata()
modelcomponents = self.specfit.fitter.components(xdata,
mpp,
**self.specfit._component_kwargs)
for jj,data in enumerate(modelcomponents):
if ii % 2 == jj:
# can have multidimensional components
if len(data.shape) > 1:
for d in (data):
line.set_ydata(d)
else:
line.set_ydata(data)
self.specfit.Spectrum.plotter.refresh()
self.sliders = dictlist()
npars = len(self.specfit.parinfo)
for param in self.specfit.parinfo:
name = param['parname']
value = param['value']
limited = param['limited']
limits = param['limits']
# make one less subplot so that there's room for buttons
# param['n'] is zero-indexed, subplots are 1-indexed
ax = self.toolfig.add_subplot(npars+1,1,param['n']+1)
ax.set_navigate(False)
if name in parlimitdict:
limits = parlimitdict[name]
limited = [True,True]
if limited[0]:
vmin = limits[0]
elif value != 0:
vmin = min([value/4.0,value*4.0])
else:
vmin = -1
if limited[1]:
vmax = limits[1]
elif value != 0:
vmax = max([value/4.0,value*4.0])
else:
vmax = 1
try:
self.sliders[name] = ModifiableSlider(ax,
name, vmin, vmax, valinit=value)
except ValueError:
self.sliders[name] = ModifiableSlider(ax,
name, vmin.value, vmax.value, valinit=value)
self.sliders[-1].on_changed(update)
def get_values(self):
return [s.val for s in self.sliders]
class FitterTools(Widget):
"""
A tool to monitor and play with :class:`pyspeckit.spectrum.fitter` properties
--------------------------
| Baseline range [x,x] |
| Baseline order - |
| (Baseline subtracted) |
| |
| Fitter range [x,x] |
| Fitter type ------- |
| Fitter Guesses [p,w] |
| ... ... |
| ... ... |
| |
| (Fit) (BL fit) (reset) |
--------------------------
"""
def __init__(self, specfit, targetfig, toolfig=None, nsubplots=12):
"""
*targetfig*
The figure instance to adjust
*toolfig*
The figure instance to embed the subplot tool into. If
None, a default figure will be created. If you are using
this from the GUI
"""
self.targetfig = targetfig
self.specfit = specfit
self.baseline = specfit.Spectrum.baseline
self.plotter = specfit.Spectrum.plotter
from matplotlib import pyplot
if toolfig is None:
tbar = matplotlib.rcParams['toolbar'] # turn off the navigation toolbar for the toolfig
matplotlib.rcParams['toolbar'] = 'None'
self.toolfig = pyplot.figure(figsize=(6,3))
self.toolfig.canvas.set_window_title("Fit Tools for "+targetfig.canvas.manager.window.title())
self.toolfig.subplots_adjust(top=0.9,left=0.05,right=0.95)
matplotlib.rcParams['toolbar'] = tbar
else:
self.toolfig = toolfig
self.toolfig.subplots_adjust(left=0.0, right=1.0)
#bax = self.toolfig.add_axes([0.6, 0.05, 0.15, 0.075])
#self.buttonrefresh = Button(bax, 'Refresh')
# buttons ruin everything.
# fax = self.toolfig.add_axes([0.1, 0.05, 0.15, 0.075])
# self.buttonfit = Button(fax, 'Fit')
#
# resetax = self.toolfig.add_axes([0.7, 0.05, 0.15, 0.075])
# self.buttonreset = Button(resetax, 'Reset')
# resetblax = self.toolfig.add_axes([0.3, 0.05, 0.15, 0.075])
# self.buttonresetbl = Button(resetblax, 'Reset BL')
# resetfitax = self.toolfig.add_axes([0.5, 0.05, 0.15, 0.075])
# self.buttonresetfit = Button(resetfitax, 'Reset fit')
def refresh(event):
thisdrawon = self.drawon
self.drawon = False
self.update_information()
# draw the canvas
self.drawon = thisdrawon
if self.drawon:
self.toolfig.canvas.draw()
self.targetfig.canvas.draw()
def fit(event):
self.specfit.button3action(event)
def reset_fit(event):
self.specfit.guesses = []
self.specfit.npeaks = 0
self.specfit.includemask[:] = True
self.refresh(event)
def reset_baseline(event):
self.baseline.unsubtract()
self.refresh(event)
def reset(event):
reset_baseline(event)
reset_fit(event)
self.plotter()
self.refresh(event)
# during refresh there can be a temporary invalid state
# depending on the order of the refresh so we turn off
# validation for the refreshting
#validate = self.toolfig.subplotpars.validate
#self.toolfig.subplotpars.validate = False
#self.buttonrefresh.on_clicked(refresh)
#self.toolfig.subplotpars.validate = validate
# these break everything.
# self.buttonfit.on_clicked(fit)
# self.buttonresetfit.on_clicked(reset_fit)
# self.buttonresetbl.on_clicked(reset_baseline)
# self.buttonreset.on_clicked(reset)
#menuitems = []
#for label in ('polynomial','blackbody','log-poly'):
# def on_select(item):
# print 'you selected', item.labelstr
# item = MenuItem(fig, label, props=props, hoverprops=hoverprops,
# on_select=on_select)
# menuitems.append(item)
#menu = Menu(fig, menuitems)
self.axes = [self.toolfig.add_subplot(nsubplots,1,spnum, frame_on=False, navigate=False, xticks=[], yticks=[])
for spnum in xrange(1,nsubplots+1)]
#self.axes = self.toolfig.add_axes([0,0,1,1])
self.use_axes = [0,1,2,4,5,6,7,8,9,10,11]
self.labels = dict([(axnum,None) for axnum in self.use_axes])
self.update_information()
self.targetfig.canvas.mpl_connect('button_press_event',self.refresh)
self.targetfig.canvas.mpl_connect('key_press_event',self.refresh)
self.targetfig.canvas.mpl_connect('draw_event',self.refresh)
def refresh(self, event):
try:
thisdrawon = self.drawon
self.drawon = False
self.update_information()
# draw the canvas
self.drawon = thisdrawon
if self.drawon:
self.toolfig.canvas.draw()
except:
# ALWAYS fail silently
# this is TERRIBLE coding practice, but I have no idea how to tell the object to disconnect
# when the figure is closed
pass
def update_information(self, **kwargs):
self.information = [
("Baseline Range","(%g,%g)" % (self.baseline.xmin,self.baseline.xmax)),
("Baseline Order","%i" % (self.baseline.order)),
("Baseline Subtracted?","%s" % (self.baseline.subtracted)),
("Fitter Range","(%g,%g)" % (self.specfit.xmin,self.specfit.xmax)),
("Fitter Type","%s" % (self.specfit.fittype)),
]
for ii in xrange(self.specfit.npeaks):
guesses = tuple(self.specfit.guesses[ii:ii+3])
if len(guesses) == 3:
self.information += [("Fitter guesses%i:" % ii , "p: %g c: %g w: %g" % guesses) ]
else:
break
self.show_labels(**kwargs)
def show_selected_region(self):
self.specfit.highlight_fitregion()
def show_label(self, axis, text, xloc=0.0, yloc=0.5, **kwargs):
return axis.text(xloc, yloc, text, **kwargs)
def show_value(self, axis, text, xloc=0.5, yloc=0.5, **kwargs):
return axis.text(xloc, yloc, text, **kwargs)
def show_labels(self, **kwargs):
for axnum,(label,text) in zip(self.use_axes, self.information):
if self.labels[axnum] is not None and len(self.labels[axnum]) == 2:
labelobject,textobject = self.labels[axnum]
labelobject.set_label(label)
textobject.set_text(text)
else:
self.labels[axnum] = (self.show_label(self.axes[axnum],label),
self.show_value(self.axes[axnum],text))
def update_info_texts(self):
for newtext,textobject in zip(self.information.values(), self.info_texts):
textobject.set_text(newtext)
#import parinfo
#
#class ParameterButton(parinfo.Parinfo):
# """
# A class to manipulate individual parameter values
# """
# def __init__(self,
|
mit
|
Djabbz/scikit-learn
|
sklearn/utils/random.py
|
234
|
10510
|
# Author: Hamzeh Alsalhi <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
|
bsd-3-clause
|
abimannans/scikit-learn
|
sklearn/tests/test_cross_validation.py
|
29
|
46740
|
"""Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1./3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
|
bsd-3-clause
|
sandeepgupta2k4/tensorflow
|
tensorflow/tools/dist_test/python/census_widendeep.py
|
54
|
11900
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distributed training and evaluation of a wide and deep model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import sys
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.estimators import run_config
# Constants: Data download URLs
TRAIN_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data"
TEST_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.test"
# Define features for the model
def census_model_config():
"""Configuration for the census Wide & Deep model.
Returns:
columns: Column names to retrieve from the data source
label_column: Name of the label column
wide_columns: List of wide columns
deep_columns: List of deep columns
categorical_column_names: Names of the categorical columns
continuous_column_names: Names of the continuous columns
"""
# 1. Categorical base columns.
gender = tf.contrib.layers.sparse_column_with_keys(
column_name="gender", keys=["female", "male"])
race = tf.contrib.layers.sparse_column_with_keys(
column_name="race",
keys=["Amer-Indian-Eskimo",
"Asian-Pac-Islander",
"Black",
"Other",
"White"])
education = tf.contrib.layers.sparse_column_with_hash_bucket(
"education", hash_bucket_size=1000)
marital_status = tf.contrib.layers.sparse_column_with_hash_bucket(
"marital_status", hash_bucket_size=100)
relationship = tf.contrib.layers.sparse_column_with_hash_bucket(
"relationship", hash_bucket_size=100)
workclass = tf.contrib.layers.sparse_column_with_hash_bucket(
"workclass", hash_bucket_size=100)
occupation = tf.contrib.layers.sparse_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.contrib.layers.sparse_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# 2. Continuous base columns.
age = tf.contrib.layers.real_valued_column("age")
age_buckets = tf.contrib.layers.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
education_num = tf.contrib.layers.real_valued_column("education_num")
capital_gain = tf.contrib.layers.real_valued_column("capital_gain")
capital_loss = tf.contrib.layers.real_valued_column("capital_loss")
hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week")
wide_columns = [
gender, native_country, education, occupation, workclass,
marital_status, relationship, age_buckets,
tf.contrib.layers.crossed_column([education, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([native_country, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([age_buckets, race, occupation],
hash_bucket_size=int(1e6))]
deep_columns = [
tf.contrib.layers.embedding_column(workclass, dimension=8),
tf.contrib.layers.embedding_column(education, dimension=8),
tf.contrib.layers.embedding_column(marital_status, dimension=8),
tf.contrib.layers.embedding_column(gender, dimension=8),
tf.contrib.layers.embedding_column(relationship, dimension=8),
tf.contrib.layers.embedding_column(race, dimension=8),
tf.contrib.layers.embedding_column(native_country, dimension=8),
tf.contrib.layers.embedding_column(occupation, dimension=8),
age, education_num, capital_gain, capital_loss, hours_per_week]
# Define the column names for the data sets.
columns = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week",
"native_country", "income_bracket"]
label_column = "label"
categorical_columns = ["workclass", "education", "marital_status",
"occupation", "relationship", "race", "gender",
"native_country"]
continuous_columns = ["age", "education_num", "capital_gain",
"capital_loss", "hours_per_week"]
return (columns, label_column, wide_columns, deep_columns,
categorical_columns, continuous_columns)
class CensusDataSource(object):
"""Source of census data."""
def __init__(self, data_dir, train_data_url, test_data_url,
columns, label_column,
categorical_columns, continuous_columns):
"""Constructor of CensusDataSource.
Args:
data_dir: Directory to save/load the data files
train_data_url: URL from which the training data can be downloaded
test_data_url: URL from which the test data can be downloaded
columns: Columns to retrieve from the data files (A list of strings)
label_column: Name of the label column
categorical_columns: Names of the categorical columns (A list of strings)
continuous_columns: Names of the continuous columsn (A list of strings)
"""
# Retrieve data from disk (if available) or download from the web.
train_file_path = os.path.join(data_dir, "adult.data")
if os.path.isfile(train_file_path):
print("Loading training data from file: %s" % train_file_path)
train_file = open(train_file_path)
else:
urllib.urlretrieve(train_data_url, train_file_path)
test_file_path = os.path.join(data_dir, "adult.test")
if os.path.isfile(test_file_path):
print("Loading test data from file: %s" % test_file_path)
test_file = open(test_file_path)
else:
test_file = open(test_file_path)
urllib.urlretrieve(test_data_url, test_file_path)
# Read the training and testing data sets into Pandas DataFrame.
import pandas # pylint: disable=g-import-not-at-top
self._df_train = pandas.read_csv(train_file, names=columns,
skipinitialspace=True)
self._df_test = pandas.read_csv(test_file, names=columns,
skipinitialspace=True, skiprows=1)
# Remove the NaN values in the last rows of the tables
self._df_train = self._df_train[:-1]
self._df_test = self._df_test[:-1]
# Apply the threshold to get the labels.
income_thresh = lambda x: ">50K" in x
self._df_train[label_column] = (
self._df_train["income_bracket"].apply(income_thresh)).astype(int)
self._df_test[label_column] = (
self._df_test["income_bracket"].apply(income_thresh)).astype(int)
self.label_column = label_column
self.categorical_columns = categorical_columns
self.continuous_columns = continuous_columns
def input_train_fn(self):
return self._input_fn(self._df_train)
def input_test_fn(self):
return self._input_fn(self._df_test)
# TODO(cais): Turn into minibatch feeder
def _input_fn(self, df):
"""Input data function.
Creates a dictionary mapping from each continuous feature column name
(k) to the values of that column stored in a constant Tensor.
Args:
df: data feed
Returns:
feature columns and labels
"""
continuous_cols = {k: tf.constant(df[k].values)
for k in self.continuous_columns}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {
k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in self.categorical_columns}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols.items() + categorical_cols.items())
# Converts the label column into a constant Tensor.
label = tf.constant(df[self.label_column].values)
# Returns the feature columns and the label.
return feature_cols, label
def _create_experiment_fn(output_dir): # pylint: disable=unused-argument
"""Experiment creation function."""
(columns, label_column, wide_columns, deep_columns, categorical_columns,
continuous_columns) = census_model_config()
census_data_source = CensusDataSource(FLAGS.data_dir,
TRAIN_DATA_URL, TEST_DATA_URL,
columns, label_column,
categorical_columns,
continuous_columns)
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
tf.contrib.learn.TaskType.PS: ["fake_ps"] *
FLAGS.num_parameter_servers
},
"task": {
"index": FLAGS.worker_index
}
})
config = run_config.RunConfig(master=FLAGS.master_grpc_url)
estimator = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=FLAGS.model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[5],
config=config)
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=census_data_source.input_train_fn,
eval_input_fn=census_data_source.input_test_fn,
train_steps=FLAGS.train_steps,
eval_steps=FLAGS.eval_steps
)
def main(unused_argv):
print("Worker index: %d" % FLAGS.worker_index)
learn_runner.run(experiment_fn=_create_experiment_fn,
output_dir=FLAGS.output_dir,
schedule=FLAGS.schedule)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--data_dir",
type=str,
default="/tmp/census-data",
help="Directory for storing the cesnsus data"
)
parser.add_argument(
"--model_dir",
type=str,
default="/tmp/census_wide_and_deep_model",
help="Directory for storing the model"
)
parser.add_argument(
"--output_dir",
type=str,
default="",
help="Base output directory."
)
parser.add_argument(
"--schedule",
type=str,
default="local_run",
help="Schedule to run for this experiment."
)
parser.add_argument(
"--master_grpc_url",
type=str,
default="",
help="URL to master GRPC tensorflow server, e.g.,grpc://127.0.0.1:2222"
)
parser.add_argument(
"--num_parameter_servers",
type=int,
default=0,
help="Number of parameter servers"
)
parser.add_argument(
"--worker_index",
type=int,
default=0,
help="Worker index (>=0)"
)
parser.add_argument(
"--train_steps",
type=int,
default=1000,
help="Number of training steps"
)
parser.add_argument(
"--eval_steps",
type=int,
default=1,
help="Number of evaluation steps"
)
global FLAGS # pylint:disable=global-at-module-level
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
apache-2.0
|
nextstrain/augur
|
augur/clades.py
|
1
|
7796
|
"""
Assign clades to nodes in a tree based on amino-acid or nucleotide signatures.
"""
import sys
from Bio import Phylo
import pandas as pd
import numpy as np
from collections import defaultdict
from .utils import get_parent_name_by_child_name_for_tree, read_node_data, write_json, get_json_name
def read_in_clade_definitions(clade_file):
'''
Reads in tab-seperated file that defines clades by amino acid or nucleotide mutations
Format
------
clade gene site alt
Clade_1 ctpE 81 D
Clade_2 nuc 30642 T
Clade_3 nuc 444296 A
Clade_4 pks8 634 T
Parameters
----------
clade_file : str
meta data file
Returns
-------
dict
clade definitions as :code:`{clade_name:[(gene, site, allele),...]}`
'''
clades = defaultdict(list)
df = pd.read_csv(clade_file, sep='\t' if clade_file.endswith('.tsv') else ',')
for index, row in df.iterrows():
allele = (row.gene, row.site-1, row.alt)
clades[row.clade].append(allele)
clades.default_factory = None
return clades
def is_node_in_clade(clade_alleles, node, ref):
'''
Determines whether a node matches the clade definition based on sequence
For any condition, will first look in mutations stored in node.sequences,
then check whether a reference sequence is available, and other reports 'non-match'
Parameters
----------
clade_alleles : list
list of clade defining alleles
node : Phylo.Node
node to check, assuming sequences (as mutations) are attached to node
ref : str/list
positions
Returns
-------
bool
True if in clade
'''
conditions = []
for gene, pos, clade_state in clade_alleles:
if gene in node.sequences and pos in node.sequences[gene]:
state = node.sequences[gene][pos]
elif ref and gene in ref:
state = ref[gene][pos]
else:
state = ''
conditions.append(state==clade_state)
return all(conditions)
def assign_clades(clade_designations, all_muts, tree, ref=None):
'''
Ensures all nodes have an entry (or auspice doesn't display nicely), tests each node
to see if it's the first member of a clade (assigns 'clade_annotation'), and sets
all nodes's clade_membership to the value of their parent. This will change if later found to be
the first member of a clade.
Parameters
----------
clade_designations : dict
clade definitions as :code:`{clade_name:[(gene, site, allele),...]}`
all_muts : dict
mutations in each node
tree : Phylo.Tree
phylogenetic tree to process
ref : str/list, optional
reference sequence to look up state when not mutated
Returns
-------
dict
mapping of node to clades
'''
clade_membership = {}
parents = get_parent_name_by_child_name_for_tree(tree)
# first pass to set all nodes to unassigned as precaution to ensure attribute is set
for node in tree.find_clades(order = 'preorder'):
clade_membership[node.name] = {'clade_membership': 'unassigned'}
# count leaves
for node in tree.find_clades(order = 'postorder'):
node.leaf_count = 1 if node.is_terminal() else np.sum([c.leaf_count for c in node])
for node in tree.get_nonterminals():
for c in node:
c.up=node
tree.root.up = None
tree.root.sequences = {'nuc':{}}
tree.root.sequences.update({gene:{} for gene in all_muts[tree.root.name]['aa_muts']})
# attach sequences to all nodes
for node in tree.find_clades(order='preorder'):
if node.up:
node.sequences = {gene:muts.copy() for gene, muts in node.up.sequences.items()}
for mut in all_muts[node.name]['muts']:
a, pos, d = mut[0], int(mut[1:-1])-1, mut[-1]
node.sequences['nuc'][pos] = d
if 'aa_muts' in all_muts[node.name]:
for gene in all_muts[node.name]['aa_muts']:
for mut in all_muts[node.name]['aa_muts'][gene]:
a, pos, d = mut[0], int(mut[1:-1])-1, mut[-1]
if gene not in node.sequences:
node.sequences[gene]={}
node.sequences[gene][pos] = d
# second pass to assign 'clade_annotation' to basal nodes within each clade
# if multiple nodes match, assign annotation to largest
# otherwise occasional unwanted cousin nodes get assigned the annotation
for clade_name, clade_alleles in clade_designations.items():
node_counts = []
for node in tree.find_clades(order = 'preorder'):
if is_node_in_clade(clade_alleles, node, ref):
node_counts.append(node)
sorted_nodes = sorted(node_counts, key=lambda x: x.leaf_count, reverse=True)
if len(sorted_nodes) > 0:
target_node = sorted_nodes[0]
clade_membership[target_node.name] = {'clade_annotation': clade_name, 'clade_membership': clade_name}
# third pass to propagate 'clade_membership'
# don't propagate if encountering 'clade_annotation'
for node in tree.find_clades(order = 'preorder'):
for child in node:
if 'clade_annotation' not in clade_membership[child.name]:
clade_membership[child.name]['clade_membership'] = clade_membership[node.name]['clade_membership']
return clade_membership
def get_reference_sequence_from_root_node(all_muts, root_name):
# attach sequences to root
ref = {}
try:
ref['nuc'] = list(all_muts[root_name]["sequence"])
except:
print("WARNING in augur.clades: nucleotide mutation json does not contain full sequences for the root node.")
if "aa_muts" in all_muts[root_name]:
try:
ref.update({gene:list(seq) for gene, seq in all_muts[root_name]["aa_sequences"].items()})
except:
print("WARNING in augur.clades: amino acid mutation json does not contain full sequences for the root node.")
return ref
def register_arguments(parser):
parser.add_argument('--tree', help="prebuilt Newick -- no tree will be built if provided")
parser.add_argument('--mutations', nargs='+', help='JSON(s) containing ancestral and tip nucleotide and/or amino-acid mutations ')
parser.add_argument('--reference', nargs='+', help='fasta files containing reference and tip nucleotide and/or amino-acid sequences ')
parser.add_argument('--clades', type=str, help='TSV file containing clade definitions by amino-acid')
parser.add_argument('--output-node-data', type=str, help='name of JSON file to save clade assignments to')
def run(args):
## read tree and data, if reading data fails, return with error code
tree = Phylo.read(args.tree, 'newick')
node_data = read_node_data(args.mutations, args.tree)
if node_data is None:
print("ERROR: could not read node data (incl sequences)")
return 1
all_muts = node_data['nodes']
if args.reference:
# PLACE HOLDER FOR vcf WORKFLOW.
# Works without a reference for now but can be added if clade defs contain positions
# that are monomorphic across reference and sequence sample.
ref = None
else:
# extract reference sequences from the root node entry in the mutation json
# if this doesn't exist, it will complain but not error.
ref = get_reference_sequence_from_root_node(all_muts, tree.root.name)
clade_designations = read_in_clade_definitions(args.clades)
clade_membership = assign_clades(clade_designations, all_muts, tree, ref)
out_name = get_json_name(args)
write_json({'nodes': clade_membership}, out_name)
print("clades written to", out_name, file=sys.stdout)
|
agpl-3.0
|
tosolveit/scikit-learn
|
examples/manifold/plot_mds.py
|
261
|
2616
|
"""
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <[email protected]>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)
plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')
plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')
plt.legend(('True position', 'MDS', 'NMDS'), loc='best')
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
|
bsd-3-clause
|
lkuchenb/shogun
|
examples/undocumented/python_modular/graphical/so_multiclass_BMRM.py
|
10
|
2835
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from modshogun import RealFeatures
from modshogun import MulticlassModel, MulticlassSOLabels, RealNumber, DualLibQPBMSOSVM
from modshogun import BMRM, PPBMRM, P3BMRM
from modshogun import StructuredAccuracy
def fill_data(cnt, minv, maxv):
x1 = np.linspace(minv, maxv, cnt)
a, b = np.meshgrid(x1, x1)
X = np.array((np.ravel(a), np.ravel(b)))
y = np.zeros((1, cnt*cnt))
tmp = cnt*cnt;
y[0, tmp/3:(tmp/3)*2]=1
y[0, tmp/3*2:(tmp/3)*3]=2
return X, y.flatten()
def gen_data():
covs = np.array([[[0., -1. ], [2.5, .7]],
[[3., -1.5], [1.2, .3]],
[[ 2, 0 ], [ .0, 1.5 ]]])
X = np.r_[np.dot(np.random.randn(N, dim), covs[0]) + np.array([0, 10]),
np.dot(np.random.randn(N, dim), covs[1]) + np.array([-10, -10]),
np.dot(np.random.randn(N, dim), covs[2]) + np.array([10, -10])];
Y = np.hstack((np.zeros(N), np.ones(N), 2*np.ones(N)))
return X, Y
def get_so_labels(out):
N = out.get_num_labels()
l = np.zeros(N)
for i in xrange(N):
l[i] = RealNumber.obtain_from_generic(out.get_label(i)).value
return l
# Number of classes
M = 3
# Number of samples of each class
N = 1000
# Dimension of the data
dim = 2
X, y = gen_data()
cnt = 250
X2, y2 = fill_data(cnt, np.min(X), np.max(X))
labels = MulticlassSOLabels(y)
features = RealFeatures(X.T)
model = MulticlassModel(features, labels)
lambda_ = 1e1
sosvm = DualLibQPBMSOSVM(model, labels, lambda_)
sosvm.set_cleanAfter(10) # number of iterations that cutting plane has to be inactive for to be removed
sosvm.set_cleanICP(True) # enables inactive cutting plane removal feature
sosvm.set_TolRel(0.001) # set relative tolerance
sosvm.set_verbose(True) # enables verbosity of the solver
sosvm.set_cp_models(16) # set number of cutting plane models
sosvm.set_solver(BMRM) # select training algorithm
#sosvm.set_solver(PPBMRM)
#sosvm.set_solver(P3BMRM)
sosvm.train()
res = sosvm.get_result()
Fps = np.array(res.get_hist_Fp_vector())
Fds = np.array(res.get_hist_Fp_vector())
wdists = np.array(res.get_hist_wdist_vector())
plt.figure()
plt.subplot(221)
plt.title('Fp and Fd history')
plt.plot(xrange(res.get_n_iters()), Fps, hold=True)
plt.plot(xrange(res.get_n_iters()), Fds, hold=True)
plt.subplot(222)
plt.title('w dist history')
plt.plot(xrange(res.get_n_iters()), wdists)
# Evaluation
out = sosvm.apply()
Evaluation = StructuredAccuracy()
acc = Evaluation.evaluate(out, labels)
print "Correct classification rate: %0.4f%%" % ( 100.0*acc )
# show figure
Z = get_so_labels(sosvm.apply(RealFeatures(X2)))
x = (X2[0,:]).reshape(cnt, cnt)
y = (X2[1,:]).reshape(cnt, cnt)
z = Z.reshape(cnt, cnt)
plt.subplot(223)
plt.pcolor(x, y, z)
plt.contour(x, y, z, linewidths=1, colors='black', hold=True)
plt.plot(X[:,0], X[:,1], 'yo')
plt.axis('tight')
plt.title('Classification')
plt.show()
|
gpl-3.0
|
jlegendary/scikit-learn
|
examples/ensemble/plot_forest_importances_faces.py
|
403
|
1519
|
"""
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
|
bsd-3-clause
|
joernhees/scikit-learn
|
examples/svm/plot_svm_nonlinear.py
|
62
|
1119
|
"""
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired,
edgecolors='k')
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
|
bsd-3-clause
|
njwilson23/scipy
|
doc/source/conf.py
|
40
|
10928
|
# -*- coding: utf-8 -*-
import sys, os, re
# Check Sphinx version
import sphinx
if sphinx.__version__ < "1.1":
raise RuntimeError("Sphinx 1.1 or newer required")
needs_sphinx = '1.1'
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath('../sphinxext'))
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'numpydoc',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
'sphinx.ext.autosummary', 'scipyoptdoc']
# Determine if the matplotlib has a recent enough version of the
# plot_directive.
try:
from matplotlib.sphinxext import plot_directive
except ImportError:
use_matplotlib_plot_directive = False
else:
try:
use_matplotlib_plot_directive = (plot_directive.__version__ >= 2)
except AttributeError:
use_matplotlib_plot_directive = False
if use_matplotlib_plot_directive:
extensions.append('matplotlib.sphinxext.plot_directive')
else:
raise RuntimeError("You need a recent enough version of matplotlib")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'SciPy'
copyright = '2008-2014, The Scipy community'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
import scipy
version = re.sub(r'\.dev-.*$', r'.dev', scipy.__version__)
release = scipy.__version__
print "Scipy (VERSION %s)" % (version,)
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "autolink"
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme')
if os.path.isdir(themedir):
html_theme = 'scipy'
html_theme_path = [themedir]
if 'scipyorg' in tags:
# Build for the scipy.org website
html_theme_options = {
"edit_link": True,
"sidebar": "right",
"scipy_org_logo": True,
"rootlinks": [("http://scipy.org/", "Scipy.org"),
("http://docs.scipy.org/", "Docs")]
}
else:
# Default build
html_theme_options = {
"edit_link": False,
"sidebar": "left",
"scipy_org_logo": False,
"rootlinks": []
}
html_logo = '_static/scipyshiny_small.png'
html_sidebars = {'index': 'indexsidebar.html'}
else:
# Build without scipy.org sphinx theme present
if 'scipyorg' in tags:
raise RuntimeError("Get the scipy-sphinx-theme first, "
"via git submodule init & update")
else:
html_style = 'scipy_fallback.css'
html_logo = '_static/scipyshiny_small.png'
html_sidebars = {'index': 'indexsidebar.html'}
html_title = "%s v%s Reference Guide" % (project, version)
html_static_path = ['_static']
html_last_updated_fmt = '%b %d, %Y'
html_additional_pages = {}
html_use_modindex = True
html_copy_source = False
html_file_suffix = '.html'
htmlhelp_basename = 'scipy'
pngmath_use_preview = True
pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent']
# -----------------------------------------------------------------------------
# LaTeX output
# -----------------------------------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = 'Written by the SciPy community'
latex_documents = [
('index', 'scipy-ref.tex', 'SciPy Reference Guide', _stdauthor, 'manual'),
# ('user/index', 'scipy-user.tex', 'SciPy User Guide',
# _stdauthor, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters etc. sections, align uniformly, and adjust label emphasis
\usepackage{expdlist}
\let\latexdescription=\description
\let\endlatexdescription=\enddescription
\renewenvironment{description}%
{\begin{latexdescription}[\setleftmargin{60pt}\breaklabel\setlabelstyle{\bfseries\itshape}]}%
{\end{latexdescription}}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\normalfont\bfseries\itshape}%
{\py@NormalColor}{0em}{\py@NormalColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
% Save vertical space in parameter lists and elsewhere
\makeatletter
\renewenvironment{quote}%
{\list{}{\topsep=0pt%
\parsep \z@ \@plus\p@}%
\item\relax}%
{\endlist}
\makeatother
% Fix footer/header
\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {
'http://docs.python.org/dev': None,
'http://docs.scipy.org/doc/numpy': None,
}
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
# If we want to do a phantom import from an XML file for all autodocs
phantom_import_file = 'dump.xml'
# Generate plots for example sections
numpydoc_use_plots = True
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
if sphinx.__version__ >= "0.7":
import glob
autosummary_generate = glob.glob("*.rst")
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
#------------------------------------------------------------------------------
# Plot
#------------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
np.random.seed(123)
"""
plot_include_source = True
plot_formats = [('png', 96), 'pdf']
plot_html_show_formats = False
import math
phi = (math.sqrt(5) + 1)/2
font_size = 13*72/96.0 # 13 px
plot_rcparams = {
'font.size': font_size,
'axes.titlesize': font_size,
'axes.labelsize': font_size,
'xtick.labelsize': font_size,
'ytick.labelsize': font_size,
'legend.fontsize': font_size,
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
}
if not use_matplotlib_plot_directive:
import matplotlib
matplotlib.rcParams.update(plot_rcparams)
# -----------------------------------------------------------------------------
# Source code links
# -----------------------------------------------------------------------------
import inspect
from os.path import relpath, dirname
for name in ['sphinx.ext.linkcode', 'linkcode', 'numpydoc.linkcode']:
try:
__import__(name)
extensions.append(name)
break
except ImportError:
pass
else:
print "NOTE: linkcode extension not found -- no links to source generated"
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(scipy.__file__))
if 'dev' in scipy.__version__:
return "http://github.com/scipy/scipy/blob/master/scipy/%s%s" % (
fn, linespec)
else:
return "http://github.com/scipy/scipy/blob/v%s/scipy/%s%s" % (
scipy.__version__, fn, linespec)
|
bsd-3-clause
|
benanne/morb
|
examples/example_gaussian_learntprecision.py
|
1
|
5501
|
import morb
from morb import rbms, stats, updaters, trainers, monitors
import theano
import theano.tensor as T
import numpy as np
import gzip, cPickle
import matplotlib.pyplot as plt
plt.ion()
from utils import generate_data, get_context
# DEBUGGING
from theano import ProfileMode
# mode = theano.ProfileMode(optimizer='fast_run', linker=theano.gof.OpWiseCLinker())
# mode = theano.compile.DebugMode(check_py_code=False, require_matching_strides=False)
mode = None
# load data
print ">> Loading dataset..."
f = gzip.open('datasets/mnist.pkl.gz','rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
train_set_x, train_set_y = train_set
valid_set_x, valid_set_y = valid_set
test_set_x, test_set_y = test_set
# TODO DEBUG
train_set_x = train_set_x[:10000]
valid_set_x = valid_set_x[:1000]
n_visible = train_set_x.shape[1]
n_hidden = 100 # 500
# n_hidden_mean = 100
# n_hidden_precision = 100
mb_size = 20
k = 1 # 15
learning_rate = 0.01 # 0.1
epochs = 2000
print ">> Constructing RBM..."
rbm = rbms.LearntPrecisionGaussianBinaryRBM(n_visible, n_hidden)
# rbm = rbms.LearntPrecisionSeparateGaussianBinaryRBM(n_visible, n_hidden_mean, n_hidden_precision)
initial_vmap = { rbm.v: T.matrix('v') }
# try to calculate weight updates using CD stats
print ">> Constructing contrastive divergence updaters..."
s = stats.cd_stats(rbm, initial_vmap, visible_units=[rbm.v], hidden_units=[rbm.h], k=k)
# s = stats.cd_stats(rbm, initial_vmap, visible_units=[rbm.v], hidden_units=[rbm.hp, rbm.hm], k=k)
# We create an updater for each parameter variable.
# IMPORTANT: the precision parameters must be constrained to be negative.
variables = [rbm.Wm.var, rbm.bvm.var, rbm.bh.var, rbm.Wp.var, rbm.bvp.var]
# variables = [rbm.Wm.var, rbm.bvm.var, rbm.bhm.var, rbm.Wp.var, rbm.bvp.var, rbm.bhp.var]
precision_variables = [rbm.Wp.var, rbm.bvp.var]
umap = {}
for var in variables:
pu = var + (learning_rate/mb_size) * updaters.CDUpdater(rbm, var, s) # the learning rate is 0.001
if var in precision_variables:
pu = updaters.BoundUpdater(pu, bound=0, type='upper')
umap[var] = pu
print ">> Compiling functions..."
t = trainers.MinibatchTrainer(rbm, umap)
m = monitors.reconstruction_mse(s, rbm.v)
m_data = s['data'][rbm.v]
m_model = s['model'][rbm.v]
e_data = rbm.energy(s['data']).mean()
e_model = rbm.energy(s['model']).mean()
# train = t.compile_function(initial_vmap, mb_size=32, monitors=[m], name='train', mode=mode)
train = t.compile_function(initial_vmap, mb_size=mb_size, monitors=[m, e_data, e_model], name='train', mode=mode)
evaluate = t.compile_function(initial_vmap, mb_size=mb_size, monitors=[m, m_data, m_model, e_data, e_model], name='evaluate', train=False, mode=mode)
def plot_data(d):
plt.figure(5)
plt.clf()
plt.imshow(d.reshape((28,28)), interpolation='gaussian')
plt.draw()
def sample_evolution(start, ns=100): # start = start data
sample = t.compile_function(initial_vmap, mb_size=1, monitors=[m_model], name='evaluate', train=False, mode=mode)
data = start
plot_data(data)
while True:
for k in range(ns):
for x in sample({ rbm.v: data }): # draw a new sample
data = x[0]
plot_data(data)
# TRAINING
print ">> Training for %d epochs..." % epochs
mses_train_so_far = []
mses_valid_so_far = []
edata_train_so_far = []
emodel_train_so_far = []
edata_so_far = []
emodel_so_far = []
for epoch in range(epochs):
monitoring_data_train = [(cost, energy_data, energy_model) for cost, energy_data, energy_model in train({ rbm.v: train_set_x })]
mses_train, edata_train_list, emodel_train_list = zip(*monitoring_data_train)
mse_train = np.mean(mses_train)
edata_train = np.mean(edata_train_list)
emodel_train = np.mean(emodel_train_list)
monitoring_data = [(cost, data, model, energy_data, energy_model) for cost, data, model, energy_data, energy_model in evaluate({ rbm.v: valid_set_x })]
mses_valid, vdata, vmodel, edata, emodel = zip(*monitoring_data)
mse_valid = np.mean(mses_valid)
edata_valid = np.mean(edata)
emodel_valid = np.mean(emodel)
# plotting
mses_train_so_far.append(mse_train)
mses_valid_so_far.append(mse_valid)
edata_so_far.append(edata_valid)
emodel_so_far.append(emodel_valid)
edata_train_so_far.append(edata_train)
emodel_train_so_far.append(emodel_train)
plt.figure(1)
plt.clf()
plt.plot(mses_train_so_far, label='train')
plt.plot(mses_valid_so_far, label='validation')
plt.title("MSE")
plt.legend()
plt.draw()
plt.figure(4)
plt.clf()
plt.plot(edata_so_far, label='validation / data')
plt.plot(emodel_so_far, label='validation / model')
plt.plot(edata_train_so_far, label='train / data')
plt.plot(emodel_train_so_far, label='train / model')
plt.title("energy")
plt.legend()
plt.draw()
# plot some samples
plt.figure(2)
plt.clf()
plt.imshow(vdata[0][0].reshape((28, 28)), vmin=0, vmax=1)
plt.colorbar()
plt.draw()
plt.figure(3)
plt.clf()
plt.imshow(vmodel[0][0].reshape((28, 28)), vmin=0, vmax=1)
plt.colorbar()
plt.draw()
print "Epoch %d" % epoch
print "training set: MSE = %.6f, data energy = %.2f, model energy = %.2f" % (mse_train, edata_train, emodel_train)
print "validation set: MSE = %.6f, data energy = %.2f, model energy = %.2f" % (mse_valid, edata_valid, emodel_valid)
|
gpl-3.0
|
crafts/crafts-core
|
crafts/predictor/regression.py
|
1
|
2170
|
from crafts.predictor import Predictor
from sklearn.linear_model import LinearRegression
import numpy as np
from datetime import timedelta
from datetime import datetime
class RegressionPredictor(Predictor):
WIDTH = 300
@staticmethod
def set_params(*params):
RegressionPredictor.WIDTH = params[0]
@staticmethod
def get_params():
return [RegressionPredictor.WIDTH]
@staticmethod
def param_spec():
return [slice(150, 950, 150)]
def _get_values(self, window, target):
new_window = []
for timestamp, value in window:
if abs(timestamp - target) % 604800 <= RegressionPredictor.WIDTH:
new_window.append((timestamp, value))
return new_window
class LinearRegressionPredictor(RegressionPredictor):
def predict(self, window, start_time, cycle_size, interval):
predictions = []
for offset in xrange(0, cycle_size + 1, interval):
target = start_time + offset
training = self._get_values(window, target)
times, values = zip(*training)
lr = LinearRegression()
lr.fit(np.array(times)[:,np.newaxis], values)
prediction = lr.predict(target)
predictions.append((target, prediction[0]))
return predictions
class ThielSenPredictor(RegressionPredictor):
def predict(self, window, start_time, cycle_size, interval):
predictions = []
for offset in xrange(0, cycle_size + 1, interval):
target = start_time + offset
training = self._get_values(window, target)
slopes = []
for ida, (time_a, value_a) in enumerate(training):
for idb, (time_b, value_b) in enumerate(training):
slopes.append(value_b - value_a / time_b - value_b)
slope = sorted(slopes)[len(slopes) / 2]
lines = []
for time, value in training:
lines.append(value - time * slope)
intercept = sorted(lines)[len(lines) / 2]
predictions.append((target, target * slope + intercept))
return predictions
|
mit
|
kseetharam/genPolaron
|
datagen_qdynamics_sph_massRat_higherCut.py
|
1
|
7007
|
import numpy as np
import pandas as pd
import xarray as xr
import Grid
import pf_dynamic_sph
import os
import sys
from timeit import default_timer as timer
from copy import copy
if __name__ == "__main__":
start = timer()
# ---- INITIALIZE GRIDS ----
higherCutoff = True; cutoffRat = 1.5
betterResolution = False; resRat = 0.5
(Lx, Ly, Lz) = (60, 60, 60)
(dx, dy, dz) = (0.25, 0.25, 0.25)
# (Lx, Ly, Lz) = (40, 40, 40)
# (dx, dy, dz) = (0.25, 0.25, 0.25)
# (Lx, Ly, Lz) = (21, 21, 21)
# (dx, dy, dz) = (0.375, 0.375, 0.375)
xgrid = Grid.Grid('CARTESIAN_3D')
xgrid.initArray('x', -Lx, Lx, dx); xgrid.initArray('y', -Ly, Ly, dy); xgrid.initArray('z', -Lz, Lz, dz)
NGridPoints_cart = (1 + 2 * Lx / dx) * (1 + 2 * Ly / dy) * (1 + 2 * Lz / dz)
NGridPoints_desired = (1 + 2 * Lx / dx) * (1 + 2 * Lz / dz)
Ntheta = 50
Nk = np.ceil(NGridPoints_desired / Ntheta)
theta_max = np.pi
thetaArray, dtheta = np.linspace(0, theta_max, Ntheta, retstep=True)
# k_max = np.sqrt((np.pi / dx)**2 + (np.pi / dy)**2 + (np.pi / dz)**2)
k_max = ((2 * np.pi / dx)**3 / (4 * np.pi / 3))**(1 / 3)
k_min = 1e-5
kArray, dk = np.linspace(k_min, k_max, Nk, retstep=True)
if dk < k_min:
print('k ARRAY GENERATION ERROR')
kgrid = Grid.Grid("SPHERICAL_2D")
if higherCutoff is True and betterResolution is False:
kgrid.initArray('k', k_min, cutoffRat * k_max, dk)
k_max = kgrid.getArray('k')[-1]
elif higherCutoff is False and betterResolution is True:
kgrid.initArray('k', k_min, k_max, resRat * dk)
dk = kgrid.getArray('k')[1] - kgrid.getArray('k')[0]
else:
kgrid.initArray_premade('k', kArray)
kgrid.initArray_premade('th', thetaArray)
# for realdyn evolution
tMax = 100
dt = 0.2
CoarseGrainRate = 50
tgrid = np.arange(0, tMax + dt, dt)
gParams = [xgrid, kgrid, tgrid]
NGridPoints = kgrid.size()
print('Total time steps: {0}'.format(tgrid.size))
print('UV cutoff: {0}'.format(k_max))
print('dk: {0}'.format(dk))
print('NGridPoints: {0}'.format(NGridPoints))
print(NGridPoints_cart, NGridPoints)
# Toggle parameters
toggleDict = {'Location': 'cluster', 'Dynamics': 'real', 'Coupling': 'twophonon', 'Grid': 'spherical', 'Longtime': 'false', 'CoarseGrainRate': CoarseGrainRate}
# ---- SET PARAMS ----
mB = 1
n0 = 1
gBB = (4 * np.pi / mB) * 0.05 # Dresher uses aBB ~ 0.2 instead of 0.5 here
# gBB = (4 * np.pi / mB) * 0.02 # Dresher uses aBB ~ 0.2 instead of 0.5 here
nu = np.sqrt(n0 * gBB / mB)
aBB = (mB / (4 * np.pi)) * gBB
xi = (8 * np.pi * n0 * aBB)**(-1 / 2)
print(k_max * xi)
print(5 * mB * xi**2)
print(-3.0 / xi)
print((n0 * aBB * 3)**(-1 / 2) * mB * xi**2)
Params_List = []
mI_Vals = np.array([0.5, 1.0, 2, 5.0])
aIBi_Vals = np.array([-10.0, -5.0, -2.0])
if higherCutoff is True or betterResolution is True:
mI_Vals = np.array([1.0, 5.0])
aIBi_Vals = np.array([-2.0, -1.0])
P_Vals_norm = np.concatenate((np.linspace(0.1, 0.8, 5, endpoint=False), np.linspace(0.8, 1.2, 10, endpoint=False), np.linspace(1.2, 3.0, 12, endpoint=False), np.linspace(3.0, 5.0, 3)))
for mI in mI_Vals:
P_Vals = mI * nu * P_Vals_norm
for aIBi in aIBi_Vals:
for P in P_Vals:
sParams = [mI, mB, n0, gBB]
cParams = [P, aIBi]
if toggleDict['Location'] == 'home':
datapath = '/home/kis/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}'.format(NGridPoints_cart)
elif toggleDict['Location'] == 'work':
datapath = '/media/kis/Storage/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}'.format(NGridPoints_cart)
elif toggleDict['Location'] == 'cluster':
datapath = '/n/scratchlfs/demler_lab/kis/genPol_data/NGridPoints_{:.2E}'.format(NGridPoints_cart)
if higherCutoff is True:
datapath = datapath + '_cutoffRat_{:.2f}'.format(cutoffRat)
if betterResolution is True:
datapath = datapath + '_resRat_{:.2f}'.format(resRat)
gridpath = copy(datapath)
datapath = datapath + '/massRatio={:.1f}'.format(mI / mB)
if toggleDict['Dynamics'] == 'real':
innerdatapath = datapath + '/redyn'
elif toggleDict['Dynamics'] == 'imaginary':
innerdatapath = datapath + '/imdyn'
if toggleDict['Grid'] == 'cartesian':
innerdatapath = innerdatapath + '_cart'
elif toggleDict['Grid'] == 'spherical':
innerdatapath = innerdatapath + '_spherical'
if toggleDict['Coupling'] == 'frohlich':
innerdatapath = innerdatapath + '_froh'
elif toggleDict['Coupling'] == 'twophonon':
innerdatapath = innerdatapath
Params_List.append([sParams, cParams, innerdatapath])
# if os.path.isdir(gridpath) is False:
# os.mkdir(gridpath)
# if os.path.isdir(datapath) is False:
# os.mkdir(datapath)
# if os.path.isdir(innerdatapath) is False:
# os.mkdir(innerdatapath)
print(len(Params_List))
# # ---- COMPUTE DATA ON COMPUTER ----
# runstart = timer()
# for ind, Params in enumerate(Params_List):
# loopstart = timer()
# [sParams, cParams, innerdatapath] = Params_List[ind]
# [mI, mB, n0, gBB] = sParams
# [P, aIBi] = cParams
# dyncart_ds = pf_dynamic_cart.quenchDynamics_DataGeneration(cParams, gParams, sParams, toggleDict)
# dyncart_ds.to_netcdf(innerdatapath + '/P_{:.3f}_aIBi_{:.2f}.nc'.format(P, aIBi))
# loopend = timer()
# print('Index: {:d}, P: {:.2f}, aIBi: {:.2f} Time: {:.2f}'.format(ind, P, aIBi, loopend - loopstart))
# end = timer()
# print('Total Time: {:.2f}'.format(end - runstart))
# ---- COMPUTE DATA ON CLUSTER ----
runstart = timer()
taskCount = int(os.getenv('SLURM_ARRAY_TASK_COUNT'))
taskID = int(os.getenv('SLURM_ARRAY_TASK_ID'))
# taskCount = len(Params_List)
# taskID = 72
if(taskCount > len(Params_List)):
print('ERROR: TASK COUNT MISMATCH')
P = float('nan')
aIBi = float('nan')
sys.exit()
else:
[sParams, cParams, innerdatapath] = Params_List[taskID]
[mI, mB, n0, gBB] = sParams
[P, aIBi] = cParams
dynsph_ds = pf_dynamic_sph.quenchDynamics_DataGeneration(cParams, gParams, sParams, toggleDict)
dynsph_ds.to_netcdf(innerdatapath + '/P_{:.3f}_aIBi_{:.2f}.nc'.format(P, aIBi))
end = timer()
print('Task ID: {:d}, P: {:.2f}, aIBi: {:.2f} Time: {:.2f}'.format(taskID, P, aIBi, end - runstart))
|
mit
|
linkmax91/bitquant
|
web/home/ipython/examples/bitcoin-wavelets.py
|
1
|
1616
|
# coding: utf-8
# In[ ]:
from BitcoinAverager import TimeUtil, BitcoinAverager, PriceCompositor, Forex, BitcoinDataLoader
# In[ ]:
all_exchanges = ['bitfinexUSD','bitstampUSD','itbitUSD',
'itbitEUR','krakenEUR','itbitSGD','anxhkHKD',
'okcoinCNY', 'btcnCNY']
compositor = PriceCompositor(all_exchanges)
compositor.reload()
# In[ ]:
from datetime import datetime
from dateutil.relativedelta import relativedelta
import pytz
hkg_time = pytz.timezone("Asia/Hong_Kong")
start_time = hkg_time.localize(datetime(2015,1,1,6,0,0))
period = relativedelta(minutes=1)
intervals = 60 * 24*90
compositor = PriceCompositor(['bitfinexUSD'], base_currency='USD')
data = compositor.composite_table(start_time, period, intervals)
data
# In[ ]:
averagers = {}
exchanges = ["anxhkHKD", "bitfinexUSD", "bitstampUSD", "btceUSD", "itbitEUR", "itbitSGD", "itbitUSD", "krakenEUR", "krakenUSD", "okcoinCNY", "btcnCNY"]
for e in exchanges:
averagers[e] = BitcoinAverager(e)
averager = averagers["bitfinexUSD"]
# In[ ]:
data['price'].tolist()
# In[ ]:
from scipy import signal
import matplotlib.pyplot as plt
sig = data['price'].tolist()
widths = pow(2,np.arange(0, 18, 0.5))
cwtmatr = signal.cwt(sig, signal.ricker, widths)
imgplot = plt.imshow(cwtmatr, aspect='auto')
imgplot
# In[ ]:
pow(2,np.arange(0, 16, 0.5))
# In[ ]:
from scipy import signal
import matplotlib.pyplot as plt
sig = data['volume'].tolist()
widths = pow(2,np.arange(0, 18, 0.5))
cwtmatr = signal.cwt(sig, signal.ricker, widths)
imgplot = plt.imshow(cwtmatr, aspect='auto')
imgplot
# In[ ]:
|
apache-2.0
|
fzalkow/scikit-learn
|
examples/classification/plot_lda.py
|
164
|
2224
|
"""
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.lda import LDA
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LDA(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LDA(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="LDA with shrinkage", color='r')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="LDA", color='g')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('LDA vs. shrinkage LDA (1 discriminative feature)')
plt.show()
|
bsd-3-clause
|
guoxiaolongzte/spark
|
python/pyspark/sql/dataframe.py
|
4
|
90501
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import random
if sys.version >= '3':
basestring = unicode = str
long = int
from functools import reduce
else:
from itertools import imap as map
import warnings
from pyspark import copy_func, since, _NoValue
from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix
from pyspark.serializers import ArrowCollectSerializer, BatchedSerializer, PickleSerializer, \
UTF8Deserializer
from pyspark.storagelevel import StorageLevel
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.sql.types import _parse_datatype_json_string
from pyspark.sql.column import Column, _to_seq, _to_list, _to_java_column
from pyspark.sql.readwriter import DataFrameWriter
from pyspark.sql.streaming import DataStreamWriter
from pyspark.sql.types import IntegralType
from pyspark.sql.types import *
from pyspark.util import _exception_message
__all__ = ["DataFrame", "DataFrameNaFunctions", "DataFrameStatFunctions"]
class DataFrame(object):
"""A distributed collection of data grouped into named columns.
A :class:`DataFrame` is equivalent to a relational table in Spark SQL,
and can be created using various functions in :class:`SparkSession`::
people = spark.read.parquet("...")
Once created, it can be manipulated using the various domain-specific-language
(DSL) functions defined in: :class:`DataFrame`, :class:`Column`.
To select a column from the data frame, use the apply method::
ageCol = people.age
A more concrete example::
# To create DataFrame using SparkSession
people = spark.read.parquet("...")
department = spark.read.parquet("...")
people.filter(people.age > 30).join(department, people.deptId == department.id) \\
.groupBy(department.name, "gender").agg({"salary": "avg", "age": "max"})
.. versionadded:: 1.3
"""
def __init__(self, jdf, sql_ctx):
self._jdf = jdf
self.sql_ctx = sql_ctx
self._sc = sql_ctx and sql_ctx._sc
self.is_cached = False
self._schema = None # initialized lazily
self._lazy_rdd = None
# Check whether _repr_html is supported or not, we use it to avoid calling _jdf twice
# by __repr__ and _repr_html_ while eager evaluation opened.
self._support_repr_html = False
@property
@since(1.3)
def rdd(self):
"""Returns the content as an :class:`pyspark.RDD` of :class:`Row`.
"""
if self._lazy_rdd is None:
jrdd = self._jdf.javaToPython()
self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer()))
return self._lazy_rdd
@property
@since("1.3.1")
def na(self):
"""Returns a :class:`DataFrameNaFunctions` for handling missing values.
"""
return DataFrameNaFunctions(self)
@property
@since(1.4)
def stat(self):
"""Returns a :class:`DataFrameStatFunctions` for statistic functions.
"""
return DataFrameStatFunctions(self)
@ignore_unicode_prefix
@since(1.3)
def toJSON(self, use_unicode=True):
"""Converts a :class:`DataFrame` into a :class:`RDD` of string.
Each row is turned into a JSON document as one element in the returned RDD.
>>> df.toJSON().first()
u'{"age":2,"name":"Alice"}'
"""
rdd = self._jdf.toJSON()
return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode))
@since(2.0)
def createTempView(self, name):
"""Creates a local temporary view with this DataFrame.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createTempView("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createTempView(name)
@since(2.0)
def createOrReplaceTempView(self, name):
"""Creates or replaces a local temporary view with this DataFrame.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
>>> df.createOrReplaceTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceTempView("people")
>>> df3 = spark.sql("select * from people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createOrReplaceTempView(name)
@since(2.1)
def createGlobalTempView(self, name):
"""Creates a global temporary view with this DataFrame.
The lifetime of this temporary view is tied to this Spark application.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createGlobalTempView("people")
>>> df2 = spark.sql("select * from global_temp.people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createGlobalTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createGlobalTempView(name)
@since(2.2)
def createOrReplaceGlobalTempView(self, name):
"""Creates or replaces a global temporary view using the given name.
The lifetime of this temporary view is tied to this Spark application.
>>> df.createOrReplaceGlobalTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceGlobalTempView("people")
>>> df3 = spark.sql("select * from global_temp.people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createOrReplaceGlobalTempView(name)
@property
@since(1.4)
def write(self):
"""
Interface for saving the content of the non-streaming :class:`DataFrame` out into external
storage.
:return: :class:`DataFrameWriter`
"""
return DataFrameWriter(self)
@property
@since(2.0)
def writeStream(self):
"""
Interface for saving the content of the streaming :class:`DataFrame` out into external
storage.
.. note:: Evolving.
:return: :class:`DataStreamWriter`
"""
return DataStreamWriter(self)
@property
@since(1.3)
def schema(self):
"""Returns the schema of this :class:`DataFrame` as a :class:`pyspark.sql.types.StructType`.
>>> df.schema
StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true)))
"""
if self._schema is None:
try:
self._schema = _parse_datatype_json_string(self._jdf.schema().json())
except AttributeError as e:
raise Exception(
"Unable to parse datatype from schema. %s" % e)
return self._schema
@since(1.3)
def printSchema(self):
"""Prints out the schema in the tree format.
>>> df.printSchema()
root
|-- age: integer (nullable = true)
|-- name: string (nullable = true)
<BLANKLINE>
"""
print(self._jdf.schema().treeString())
@since(1.3)
def explain(self, extended=False):
"""Prints the (logical and physical) plans to the console for debugging purpose.
:param extended: boolean, default ``False``. If ``False``, prints only the physical plan.
>>> df.explain()
== Physical Plan ==
*(1) Scan ExistingRDD[age#0,name#1]
>>> df.explain(True)
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
"""
if extended:
print(self._jdf.queryExecution().toString())
else:
print(self._jdf.queryExecution().simpleString())
@since(2.4)
def exceptAll(self, other):
"""Return a new :class:`DataFrame` containing rows in this :class:`DataFrame` but
not in another :class:`DataFrame` while preserving duplicates.
This is equivalent to `EXCEPT ALL` in SQL.
>>> df1 = spark.createDataFrame(
... [("a", 1), ("a", 1), ("a", 1), ("a", 2), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.exceptAll(df2).show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| a| 2|
| c| 4|
+---+---+
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.exceptAll(other._jdf), self.sql_ctx)
@since(1.3)
def isLocal(self):
"""Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally
(without any Spark executors).
"""
return self._jdf.isLocal()
@property
@since(2.0)
def isStreaming(self):
"""Returns true if this :class:`Dataset` contains one or more sources that continuously
return data as it arrives. A :class:`Dataset` that reads data from a streaming source
must be executed as a :class:`StreamingQuery` using the :func:`start` method in
:class:`DataStreamWriter`. Methods that return a single answer, (e.g., :func:`count` or
:func:`collect`) will throw an :class:`AnalysisException` when there is a streaming
source present.
.. note:: Evolving
"""
return self._jdf.isStreaming()
@since(1.3)
def show(self, n=20, truncate=True, vertical=False):
"""Prints the first ``n`` rows to the console.
:param n: Number of rows to show.
:param truncate: If set to True, truncate strings longer than 20 chars by default.
If set to a number greater than one, truncates long strings to length ``truncate``
and align cells right.
:param vertical: If set to True, print output rows vertically (one line
per column value).
>>> df
DataFrame[age: int, name: string]
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.show(truncate=3)
+---+----+
|age|name|
+---+----+
| 2| Ali|
| 5| Bob|
+---+----+
>>> df.show(vertical=True)
-RECORD 0-----
age | 2
name | Alice
-RECORD 1-----
age | 5
name | Bob
"""
if isinstance(truncate, bool) and truncate:
print(self._jdf.showString(n, 20, vertical))
else:
print(self._jdf.showString(n, int(truncate), vertical))
def __repr__(self):
if not self._support_repr_html and self.sql_ctx._conf.isReplEagerEvalEnabled():
vertical = False
return self._jdf.showString(
self.sql_ctx._conf.replEagerEvalMaxNumRows(),
self.sql_ctx._conf.replEagerEvalTruncate(), vertical)
else:
return "DataFrame[%s]" % (", ".join("%s: %s" % c for c in self.dtypes))
def _repr_html_(self):
"""Returns a dataframe with html code when you enabled eager evaluation
by 'spark.sql.repl.eagerEval.enabled', this only called by REPL you are
using support eager evaluation with HTML.
"""
import cgi
if not self._support_repr_html:
self._support_repr_html = True
if self.sql_ctx._conf.isReplEagerEvalEnabled():
max_num_rows = max(self.sql_ctx._conf.replEagerEvalMaxNumRows(), 0)
sock_info = self._jdf.getRowsToPython(
max_num_rows, self.sql_ctx._conf.replEagerEvalTruncate())
rows = list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
head = rows[0]
row_data = rows[1:]
has_more_data = len(row_data) > max_num_rows
row_data = row_data[:max_num_rows]
html = "<table border='1'>\n"
# generate table head
html += "<tr><th>%s</th></tr>\n" % "</th><th>".join(map(lambda x: cgi.escape(x), head))
# generate table rows
for row in row_data:
html += "<tr><td>%s</td></tr>\n" % "</td><td>".join(
map(lambda x: cgi.escape(x), row))
html += "</table>\n"
if has_more_data:
html += "only showing top %d %s\n" % (
max_num_rows, "row" if max_num_rows == 1 else "rows")
return html
else:
return None
@since(2.1)
def checkpoint(self, eager=True):
"""Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the
logical plan of this DataFrame, which is especially useful in iterative algorithms where the
plan may grow exponentially. It will be saved to files inside the checkpoint
directory set with L{SparkContext.setCheckpointDir()}.
:param eager: Whether to checkpoint this DataFrame immediately
.. note:: Experimental
"""
jdf = self._jdf.checkpoint(eager)
return DataFrame(jdf, self.sql_ctx)
@since(2.3)
def localCheckpoint(self, eager=True):
"""Returns a locally checkpointed version of this Dataset. Checkpointing can be used to
truncate the logical plan of this DataFrame, which is especially useful in iterative
algorithms where the plan may grow exponentially. Local checkpoints are stored in the
executors using the caching subsystem and therefore they are not reliable.
:param eager: Whether to checkpoint this DataFrame immediately
.. note:: Experimental
"""
jdf = self._jdf.localCheckpoint(eager)
return DataFrame(jdf, self.sql_ctx)
@since(2.1)
def withWatermark(self, eventTime, delayThreshold):
"""Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point
in time before which we assume no more late data is going to arrive.
Spark will use this watermark for several purposes:
- To know when a given time window aggregation can be finalized and thus can be emitted
when using output modes that do not allow updates.
- To minimize the amount of state that we need to keep for on-going aggregations.
The current watermark is computed by looking at the `MAX(eventTime)` seen across
all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost
of coordinating this value across partitions, the actual watermark used is only guaranteed
to be at least `delayThreshold` behind the actual event time. In some cases we may still
process records that arrive more than `delayThreshold` late.
:param eventTime: the name of the column that contains the event time of the row.
:param delayThreshold: the minimum delay to wait to data to arrive late, relative to the
latest record that has been processed in the form of an interval
(e.g. "1 minute" or "5 hours").
.. note:: Evolving
>>> sdf.select('name', sdf.time.cast('timestamp')).withWatermark('time', '10 minutes')
DataFrame[name: string, time: timestamp]
"""
if not eventTime or type(eventTime) is not str:
raise TypeError("eventTime should be provided as a string")
if not delayThreshold or type(delayThreshold) is not str:
raise TypeError("delayThreshold should be provided as a string interval")
jdf = self._jdf.withWatermark(eventTime, delayThreshold)
return DataFrame(jdf, self.sql_ctx)
@since(2.2)
def hint(self, name, *parameters):
"""Specifies some hint on the current DataFrame.
:param name: A name of the hint.
:param parameters: Optional parameters.
:return: :class:`DataFrame`
>>> df.join(df2.hint("broadcast"), "name").show()
+----+---+------+
|name|age|height|
+----+---+------+
| Bob| 5| 85|
+----+---+------+
"""
if len(parameters) == 1 and isinstance(parameters[0], list):
parameters = parameters[0]
if not isinstance(name, str):
raise TypeError("name should be provided as str, got {0}".format(type(name)))
allowed_types = (basestring, list, float, int)
for p in parameters:
if not isinstance(p, allowed_types):
raise TypeError(
"all parameters should be in {0}, got {1} of type {2}".format(
allowed_types, p, type(p)))
jdf = self._jdf.hint(name, self._jseq(parameters))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def count(self):
"""Returns the number of rows in this :class:`DataFrame`.
>>> df.count()
2
"""
return int(self._jdf.count())
@ignore_unicode_prefix
@since(1.3)
def collect(self):
"""Returns all the records as a list of :class:`Row`.
>>> df.collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.collectToPython()
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
@ignore_unicode_prefix
@since(2.0)
def toLocalIterator(self):
"""
Returns an iterator that contains all of the rows in this :class:`DataFrame`.
The iterator will consume as much memory as the largest partition in this DataFrame.
>>> list(df.toLocalIterator())
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.toPythonIterator()
return _load_from_socket(sock_info, BatchedSerializer(PickleSerializer()))
@ignore_unicode_prefix
@since(1.3)
def limit(self, num):
"""Limits the result count to the number specified.
>>> df.limit(1).collect()
[Row(age=2, name=u'Alice')]
>>> df.limit(0).collect()
[]
"""
jdf = self._jdf.limit(num)
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def take(self, num):
"""Returns the first ``num`` rows as a :class:`list` of :class:`Row`.
>>> df.take(2)
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
return self.limit(num).collect()
@since(1.3)
def foreach(self, f):
"""Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`.
This is a shorthand for ``df.rdd.foreach()``.
>>> def f(person):
... print(person.name)
>>> df.foreach(f)
"""
self.rdd.foreach(f)
@since(1.3)
def foreachPartition(self, f):
"""Applies the ``f`` function to each partition of this :class:`DataFrame`.
This a shorthand for ``df.rdd.foreachPartition()``.
>>> def f(people):
... for person in people:
... print(person.name)
>>> df.foreachPartition(f)
"""
self.rdd.foreachPartition(f)
@since(1.3)
def cache(self):
"""Persists the :class:`DataFrame` with the default storage level (C{MEMORY_AND_DISK}).
.. note:: The default storage level has changed to C{MEMORY_AND_DISK} to match Scala in 2.0.
"""
self.is_cached = True
self._jdf.cache()
return self
@since(1.3)
def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK):
"""Sets the storage level to persist the contents of the :class:`DataFrame` across
operations after the first time it is computed. This can only be used to assign
a new storage level if the :class:`DataFrame` does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_AND_DISK}).
.. note:: The default storage level has changed to C{MEMORY_AND_DISK} to match Scala in 2.0.
"""
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdf.persist(javaStorageLevel)
return self
@property
@since(2.1)
def storageLevel(self):
"""Get the :class:`DataFrame`'s current storage level.
>>> df.storageLevel
StorageLevel(False, False, False, False, 1)
>>> df.cache().storageLevel
StorageLevel(True, True, False, True, 1)
>>> df2.persist(StorageLevel.DISK_ONLY_2).storageLevel
StorageLevel(True, False, False, False, 2)
"""
java_storage_level = self._jdf.storageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
@since(1.3)
def unpersist(self, blocking=False):
"""Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from
memory and disk.
.. note:: `blocking` default has changed to False to match Scala in 2.0.
"""
self.is_cached = False
self._jdf.unpersist(blocking)
return self
@since(1.4)
def coalesce(self, numPartitions):
"""
Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions.
:param numPartitions: int, to specify the target number of partitions
Similar to coalesce defined on an :class:`RDD`, this operation results in a
narrow dependency, e.g. if you go from 1000 partitions to 100 partitions,
there will not be a shuffle, instead each of the 100 new partitions will
claim 10 of the current partitions. If a larger number of partitions is requested,
it will stay at the current number of partitions.
However, if you're doing a drastic coalesce, e.g. to numPartitions = 1,
this may result in your computation taking place on fewer nodes than
you like (e.g. one node in the case of numPartitions = 1). To avoid this,
you can call repartition(). This will add a shuffle step, but means the
current upstream partitions will be executed in parallel (per whatever
the current partitioning is).
>>> df.coalesce(1).rdd.getNumPartitions()
1
"""
return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx)
@since(1.3)
def repartition(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting DataFrame is hash partitioned.
:param numPartitions:
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
.. versionchanged:: 1.6
Added optional arguments to specify the partitioning columns. Also made numPartitions
optional if partitioning columns are specified.
>>> df.repartition(10).rdd.getNumPartitions()
10
>>> data = df.union(df).repartition("age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
>>> data = data.repartition(7, "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
| 2|Alice|
| 5| Bob|
+---+-----+
>>> data.rdd.getNumPartitions()
7
>>> data = data.repartition("name", "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx)
else:
return DataFrame(
self._jdf.repartition(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (basestring, Column)):
cols = (numPartitions, ) + cols
return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int or Column")
@since("2.4.0")
def repartitionByRange(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting DataFrame is range partitioned.
:param numPartitions:
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
At least one partition-by expression must be specified.
When no explicit sort order is specified, "ascending nulls first" is assumed.
Note that due to performance reasons this method uses sampling to estimate the ranges.
Hence, the output may not be consistent, since sampling can return different values.
The sample size can be controlled by the config
`spark.sql.execution.rangeExchange.sampleSizePerPartition`.
>>> df.repartitionByRange(2, "age").rdd.getNumPartitions()
2
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.repartitionByRange(1, "age").rdd.getNumPartitions()
1
>>> data = df.repartitionByRange("age")
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return ValueError("At least one partition-by expression must be specified.")
else:
return DataFrame(
self._jdf.repartitionByRange(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (basestring, Column)):
cols = (numPartitions,) + cols
return DataFrame(self._jdf.repartitionByRange(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int, string or Column")
@since(1.3)
def distinct(self):
"""Returns a new :class:`DataFrame` containing the distinct rows in this :class:`DataFrame`.
>>> df.distinct().count()
2
"""
return DataFrame(self._jdf.distinct(), self.sql_ctx)
@since(1.3)
def sample(self, withReplacement=None, fraction=None, seed=None):
"""Returns a sampled subset of this :class:`DataFrame`.
:param withReplacement: Sample with replacement or not (default False).
:param fraction: Fraction of rows to generate, range [0.0, 1.0].
:param seed: Seed for sampling (default a random seed).
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
.. note:: `fraction` is required and, `withReplacement` and `seed` are optional.
>>> df = spark.range(10)
>>> df.sample(0.5, 3).count()
4
>>> df.sample(fraction=0.5, seed=3).count()
4
>>> df.sample(withReplacement=True, fraction=0.5, seed=3).count()
1
>>> df.sample(1.0).count()
10
>>> df.sample(fraction=1.0).count()
10
>>> df.sample(False, fraction=1.0).count()
10
"""
# For the cases below:
# sample(True, 0.5 [, seed])
# sample(True, fraction=0.5 [, seed])
# sample(withReplacement=False, fraction=0.5 [, seed])
is_withReplacement_set = \
type(withReplacement) == bool and isinstance(fraction, float)
# For the case below:
# sample(faction=0.5 [, seed])
is_withReplacement_omitted_kwargs = \
withReplacement is None and isinstance(fraction, float)
# For the case below:
# sample(0.5 [, seed])
is_withReplacement_omitted_args = isinstance(withReplacement, float)
if not (is_withReplacement_set
or is_withReplacement_omitted_kwargs
or is_withReplacement_omitted_args):
argtypes = [
str(type(arg)) for arg in [withReplacement, fraction, seed] if arg is not None]
raise TypeError(
"withReplacement (optional), fraction (required) and seed (optional)"
" should be a bool, float and number; however, "
"got [%s]." % ", ".join(argtypes))
if is_withReplacement_omitted_args:
if fraction is not None:
seed = fraction
fraction = withReplacement
withReplacement = None
seed = long(seed) if seed is not None else None
args = [arg for arg in [withReplacement, fraction, seed] if arg is not None]
jdf = self._jdf.sample(*args)
return DataFrame(jdf, self.sql_ctx)
@since(1.5)
def sampleBy(self, col, fractions, seed=None):
"""
Returns a stratified sample without replacement based on the
fraction given on each stratum.
:param col: column that defines strata
:param fractions:
sampling fraction for each stratum. If a stratum is not
specified, we treat its fraction as zero.
:param seed: random seed
:return: a new DataFrame that represents the stratified sample
>>> from pyspark.sql.functions import col
>>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key"))
>>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0)
>>> sampled.groupBy("key").count().orderBy("key").show()
+---+-----+
|key|count|
+---+-----+
| 0| 5|
| 1| 9|
+---+-----+
>>> dataset.sampleBy(col("key"), fractions={2: 1.0}, seed=0).count()
33
.. versionchanged:: 3.0
Added sampling by a column of :class:`Column`
"""
if isinstance(col, basestring):
col = Column(col)
elif not isinstance(col, Column):
raise ValueError("col must be a string or a column, but got %r" % type(col))
if not isinstance(fractions, dict):
raise ValueError("fractions must be a dict but got %r" % type(fractions))
for k, v in fractions.items():
if not isinstance(k, (float, int, long, basestring)):
raise ValueError("key must be float, int, long, or string, but got %r" % type(k))
fractions[k] = float(v)
col = col._jc
seed = seed if seed is not None else random.randint(0, sys.maxsize)
return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)
@since(1.4)
def randomSplit(self, weights, seed=None):
"""Randomly splits this :class:`DataFrame` with the provided weights.
:param weights: list of doubles as weights with which to split the DataFrame. Weights will
be normalized if they don't sum up to 1.0.
:param seed: The seed for sampling.
>>> splits = df4.randomSplit([1.0, 2.0], 24)
>>> splits[0].count()
1
>>> splits[1].count()
3
"""
for w in weights:
if w < 0.0:
raise ValueError("Weights must be positive. Found weight value: %s" % w)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd_array = self._jdf.randomSplit(_to_list(self.sql_ctx._sc, weights), long(seed))
return [DataFrame(rdd, self.sql_ctx) for rdd in rdd_array]
@property
@since(1.3)
def dtypes(self):
"""Returns all column names and their data types as a list.
>>> df.dtypes
[('age', 'int'), ('name', 'string')]
"""
return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]
@property
@since(1.3)
def columns(self):
"""Returns all column names as a list.
>>> df.columns
['age', 'name']
"""
return [f.name for f in self.schema.fields]
@since(2.3)
def colRegex(self, colName):
"""
Selects column based on the column name specified as a regex and returns it
as :class:`Column`.
:param colName: string, column name specified as a regex.
>>> df = spark.createDataFrame([("a", 1), ("b", 2), ("c", 3)], ["Col1", "Col2"])
>>> df.select(df.colRegex("`(Col1)?+.+`")).show()
+----+
|Col2|
+----+
| 1|
| 2|
| 3|
+----+
"""
if not isinstance(colName, basestring):
raise ValueError("colName should be provided as string")
jc = self._jdf.colRegex(colName)
return Column(jc)
@ignore_unicode_prefix
@since(1.3)
def alias(self, alias):
"""Returns a new :class:`DataFrame` with an alias set.
:param alias: string, an alias name to be set for the DataFrame.
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner')
>>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age").collect()
[Row(name=u'Bob', name=u'Bob', age=5), Row(name=u'Alice', name=u'Alice', age=2)]
"""
assert isinstance(alias, basestring), "alias should be a string"
return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx)
@ignore_unicode_prefix
@since(2.1)
def crossJoin(self, other):
"""Returns the cartesian product with another :class:`DataFrame`.
:param other: Right side of the cartesian product.
>>> df.select("age", "name").collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df2.select("name", "height").collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85)]
>>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect()
[Row(age=2, name=u'Alice', height=80), Row(age=2, name=u'Alice', height=85),
Row(age=5, name=u'Bob', height=80), Row(age=5, name=u'Bob', height=85)]
"""
jdf = self._jdf.crossJoin(other._jdf)
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def join(self, other, on=None, how=None):
"""Joins with another :class:`DataFrame`, using the given join expression.
:param other: Right side of the join
:param on: a string for the join column name, a list of column names,
a join expression (Column), or a list of Columns.
If `on` is a string or a list of strings indicating the name of the join column(s),
the column(s) must exist on both sides, and this performs an equi-join.
:param how: str, default ``inner``. Must be one of: ``inner``, ``cross``, ``outer``,
``full``, ``full_outer``, ``left``, ``left_outer``, ``right``, ``right_outer``,
``left_semi``, and ``left_anti``.
The following performs a full outer join between ``df1`` and ``df2``.
>>> df.join(df2, df.name == df2.name, 'outer').select(df.name, df2.height).collect()
[Row(name=None, height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)]
>>> df.join(df2, 'name', 'outer').select('name', 'height').collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)]
>>> cond = [df.name == df3.name, df.age == df3.age]
>>> df.join(df3, cond, 'outer').select(df.name, df3.age).collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df.join(df2, 'name').select(df.name, df2.height).collect()
[Row(name=u'Bob', height=85)]
>>> df.join(df4, ['name', 'age']).select(df.name, df.age).collect()
[Row(name=u'Bob', age=5)]
"""
if on is not None and not isinstance(on, list):
on = [on]
if on is not None:
if isinstance(on[0], basestring):
on = self._jseq(on)
else:
assert isinstance(on[0], Column), "on should be Column or list of Column"
on = reduce(lambda x, y: x.__and__(y), on)
on = on._jc
if on is None and how is None:
jdf = self._jdf.join(other._jdf)
else:
if how is None:
how = "inner"
if on is None:
on = self._jseq([])
assert isinstance(how, basestring), "how should be basestring"
jdf = self._jdf.join(other._jdf, on, how)
return DataFrame(jdf, self.sql_ctx)
@since(1.6)
def sortWithinPartitions(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default True).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
>>> df.sortWithinPartitions("age", ascending=False).show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def sort(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default True).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
>>> df.sort(df.age.desc()).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.sort("age", ascending=False).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.orderBy(df.age.desc()).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> from pyspark.sql.functions import *
>>> df.sort(asc("age")).collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df.orderBy(desc("age"), "name").collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.orderBy(["age", "name"], ascending=[0, 1]).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
"""
jdf = self._jdf.sort(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
orderBy = sort
def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter)
def _jmap(self, jm):
"""Return a JVM Scala Map from a dict"""
return _to_scala_map(self.sql_ctx._sc, jm)
def _jcols(self, *cols):
"""Return a JVM Seq of Columns from a list of Column or column names
If `cols` has only one list in it, cols[0] will be used as the list.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
return self._jseq(cols, _to_java_column)
def _sort_cols(self, cols, kwargs):
""" Return a JVM Seq of Columns that describes the sort order
"""
if not cols:
raise ValueError("should sort by at least one column")
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jcols = [_to_java_column(c) for c in cols]
ascending = kwargs.get('ascending', True)
if isinstance(ascending, (bool, int)):
if not ascending:
jcols = [jc.desc() for jc in jcols]
elif isinstance(ascending, list):
jcols = [jc if asc else jc.desc()
for asc, jc in zip(ascending, jcols)]
else:
raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending))
return self._jseq(jcols)
@since("1.3.1")
def describe(self, *cols):
"""Computes basic statistics for numeric and string columns.
This include count, mean, stddev, min, and max. If no columns are
given, this function computes statistics for all numerical or string columns.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting DataFrame.
>>> df.describe(['age']).show()
+-------+------------------+
|summary| age|
+-------+------------------+
| count| 2|
| mean| 3.5|
| stddev|2.1213203435596424|
| min| 2|
| max| 5|
+-------+------------------+
>>> df.describe().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| max| 5| Bob|
+-------+------------------+-----+
Use summary for expanded statistics and control over which statistics to compute.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jdf = self._jdf.describe(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@since("2.3.0")
def summary(self, *statistics):
"""Computes specified statistics for numeric and string columns. Available statistics are:
- count
- mean
- stddev
- min
- max
- arbitrary approximate percentiles specified as a percentage (eg, 75%)
If no statistics are given, this function computes count, mean, stddev, min,
approximate quartiles (percentiles at 25%, 50%, and 75%), and max.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting DataFrame.
>>> df.summary().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| 25%| 2| null|
| 50%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+------------------+-----+
>>> df.summary("count", "min", "25%", "75%", "max").show()
+-------+---+-----+
|summary|age| name|
+-------+---+-----+
| count| 2| 2|
| min| 2|Alice|
| 25%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+---+-----+
To do a summary for specific columns first select them:
>>> df.select("age", "name").summary("count").show()
+-------+---+----+
|summary|age|name|
+-------+---+----+
| count| 2| 2|
+-------+---+----+
See also describe for basic statistics.
"""
if len(statistics) == 1 and isinstance(statistics[0], list):
statistics = statistics[0]
jdf = self._jdf.summary(self._jseq(statistics))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def head(self, n=None):
"""Returns the first ``n`` rows.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
:param n: int, default 1. Number of rows to return.
:return: If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
>>> df.head()
Row(age=2, name=u'Alice')
>>> df.head(1)
[Row(age=2, name=u'Alice')]
"""
if n is None:
rs = self.head(1)
return rs[0] if rs else None
return self.take(n)
@ignore_unicode_prefix
@since(1.3)
def first(self):
"""Returns the first row as a :class:`Row`.
>>> df.first()
Row(age=2, name=u'Alice')
"""
return self.head()
@ignore_unicode_prefix
@since(1.3)
def __getitem__(self, item):
"""Returns the column as a :class:`Column`.
>>> df.select(df['age']).collect()
[Row(age=2), Row(age=5)]
>>> df[ ["name", "age"]].collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df[ df.age > 3 ].collect()
[Row(age=5, name=u'Bob')]
>>> df[df[0] > 3].collect()
[Row(age=5, name=u'Bob')]
"""
if isinstance(item, basestring):
jc = self._jdf.apply(item)
return Column(jc)
elif isinstance(item, Column):
return self.filter(item)
elif isinstance(item, (list, tuple)):
return self.select(*item)
elif isinstance(item, int):
jc = self._jdf.apply(self.columns[item])
return Column(jc)
else:
raise TypeError("unexpected item type: %s" % type(item))
@since(1.3)
def __getattr__(self, name):
"""Returns the :class:`Column` denoted by ``name``.
>>> df.select(df.age).collect()
[Row(age=2), Row(age=5)]
"""
if name not in self.columns:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
jc = self._jdf.apply(name)
return Column(jc)
@ignore_unicode_prefix
@since(1.3)
def select(self, *cols):
"""Projects a set of expressions and returns a new :class:`DataFrame`.
:param cols: list of column names (string) or expressions (:class:`Column`).
If one of the column names is '*', that column is expanded to include all columns
in the current DataFrame.
>>> df.select('*').collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df.select('name', 'age').collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df.select(df.name, (df.age + 10).alias('age')).collect()
[Row(name=u'Alice', age=12), Row(name=u'Bob', age=15)]
"""
jdf = self._jdf.select(self._jcols(*cols))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def selectExpr(self, *expr):
"""Projects a set of SQL expressions and returns a new :class:`DataFrame`.
This is a variant of :func:`select` that accepts SQL expressions.
>>> df.selectExpr("age * 2", "abs(age)").collect()
[Row((age * 2)=4, abs(age)=2), Row((age * 2)=10, abs(age)=5)]
"""
if len(expr) == 1 and isinstance(expr[0], list):
expr = expr[0]
jdf = self._jdf.selectExpr(self._jseq(expr))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def filter(self, condition):
"""Filters rows using the given condition.
:func:`where` is an alias for :func:`filter`.
:param condition: a :class:`Column` of :class:`types.BooleanType`
or a string of SQL expression.
>>> df.filter(df.age > 3).collect()
[Row(age=5, name=u'Bob')]
>>> df.where(df.age == 2).collect()
[Row(age=2, name=u'Alice')]
>>> df.filter("age > 3").collect()
[Row(age=5, name=u'Bob')]
>>> df.where("age = 2").collect()
[Row(age=2, name=u'Alice')]
"""
if isinstance(condition, basestring):
jdf = self._jdf.filter(condition)
elif isinstance(condition, Column):
jdf = self._jdf.filter(condition._jc)
else:
raise TypeError("condition should be string or Column")
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def groupBy(self, *cols):
"""Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
:param cols: list of columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`).
>>> df.groupBy().avg().collect()
[Row(avg(age)=3.5)]
>>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(df.name).avg().collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(['name', df.age]).count().collect())
[Row(name=u'Alice', age=2, count=1), Row(name=u'Bob', age=5, count=1)]
"""
jgd = self._jdf.groupBy(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
@since(1.4)
def rollup(self, *cols):
"""
Create a multi-dimensional rollup for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
>>> df.rollup("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.rollup(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
@since(1.4)
def cube(self, *cols):
"""
Create a multi-dimensional cube for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
>>> df.cube("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
| null| 2| 1|
| null| 5| 1|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.cube(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
@since(1.3)
def agg(self, *exprs):
""" Aggregate on the entire :class:`DataFrame` without groups
(shorthand for ``df.groupBy.agg()``).
>>> df.agg({"age": "max"}).collect()
[Row(max(age)=5)]
>>> from pyspark.sql import functions as F
>>> df.agg(F.min(df.age)).collect()
[Row(min(age)=2)]
"""
return self.groupBy().agg(*exprs)
@since(2.0)
def union(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another frame.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.union(other._jdf), self.sql_ctx)
@since(1.3)
def unionAll(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another frame.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return self.union(other)
@since(2.3)
def unionByName(self, other):
""" Returns a new :class:`DataFrame` containing union of rows in this and another frame.
This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set
union (that does deduplication of elements), use this function followed by :func:`distinct`.
The difference between this function and :func:`union` is that this function
resolves columns by name (not by position):
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col0"])
>>> df1.unionByName(df2).show()
+----+----+----+
|col0|col1|col2|
+----+----+----+
| 1| 2| 3|
| 6| 4| 5|
+----+----+----+
"""
return DataFrame(self._jdf.unionByName(other._jdf), self.sql_ctx)
@since(1.3)
def intersect(self, other):
""" Return a new :class:`DataFrame` containing rows only in
both this frame and another frame.
This is equivalent to `INTERSECT` in SQL.
"""
return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx)
@since(2.4)
def intersectAll(self, other):
""" Return a new :class:`DataFrame` containing rows in both this dataframe and other
dataframe while preserving duplicates.
This is equivalent to `INTERSECT ALL` in SQL.
>>> df1 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.intersectAll(df2).sort("C1", "C2").show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| b| 3|
+---+---+
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.intersectAll(other._jdf), self.sql_ctx)
@since(1.3)
def subtract(self, other):
""" Return a new :class:`DataFrame` containing rows in this frame
but not in another frame.
This is equivalent to `EXCEPT DISTINCT` in SQL.
"""
return DataFrame(getattr(self._jdf, "except")(other._jdf), self.sql_ctx)
@since(1.4)
def dropDuplicates(self, subset=None):
"""Return a new :class:`DataFrame` with duplicate rows removed,
optionally only considering certain columns.
For a static batch :class:`DataFrame`, it just drops duplicate rows. For a streaming
:class:`DataFrame`, it will keep all data across triggers as intermediate state to drop
duplicates rows. You can use :func:`withWatermark` to limit how late the duplicate data can
be and system will accordingly limit the state. In addition, too late data older than
watermark will be dropped to avoid any possibility of duplicates.
:func:`drop_duplicates` is an alias for :func:`dropDuplicates`.
>>> from pyspark.sql import Row
>>> df = sc.parallelize([ \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=10, height=80)]).toDF()
>>> df.dropDuplicates().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 5| 80|Alice|
| 10| 80|Alice|
+---+------+-----+
>>> df.dropDuplicates(['name', 'height']).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 5| 80|Alice|
+---+------+-----+
"""
if subset is None:
jdf = self._jdf.dropDuplicates()
else:
jdf = self._jdf.dropDuplicates(self._jseq(subset))
return DataFrame(jdf, self.sql_ctx)
@since("1.3.1")
def dropna(self, how='any', thresh=None, subset=None):
"""Returns a new :class:`DataFrame` omitting rows with null values.
:func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.
:param how: 'any' or 'all'.
If 'any', drop a row if it contains any nulls.
If 'all', drop a row only if all its values are null.
:param thresh: int, default None
If specified, drop rows that have less than `thresh` non-null values.
This overwrites the `how` parameter.
:param subset: optional list of column names to consider.
>>> df4.na.drop().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
+---+------+-----+
"""
if how is not None and how not in ['any', 'all']:
raise ValueError("how ('" + how + "') should be 'any' or 'all'")
if subset is None:
subset = self.columns
elif isinstance(subset, basestring):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
if thresh is None:
thresh = len(subset) if how == 'any' else 1
return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx)
@since("1.3.1")
def fillna(self, value, subset=None):
"""Replace null values, alias for ``na.fill()``.
:func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other.
:param value: int, long, float, string, bool or dict.
Value to replace null values with.
If the value is a dict, then `subset` is ignored and `value` must be a mapping
from column name (string) to replacement value. The replacement value must be
an int, long, float, boolean, or string.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.fill(50).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
| 5| 50| Bob|
| 50| 50| Tom|
| 50| 50| null|
+---+------+-----+
>>> df5.na.fill(False).show()
+----+-------+-----+
| age| name| spy|
+----+-------+-----+
| 10| Alice|false|
| 5| Bob|false|
|null|Mallory| true|
+----+-------+-----+
>>> df4.na.fill({'age': 50, 'name': 'unknown'}).show()
+---+------+-------+
|age|height| name|
+---+------+-------+
| 10| 80| Alice|
| 5| null| Bob|
| 50| null| Tom|
| 50| null|unknown|
+---+------+-------+
"""
if not isinstance(value, (float, int, long, basestring, bool, dict)):
raise ValueError("value should be a float, int, long, string, bool or dict")
# Note that bool validates isinstance(int), but we don't want to
# convert bools to floats
if not isinstance(value, bool) and isinstance(value, (int, long)):
value = float(value)
if isinstance(value, dict):
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
elif subset is None:
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
else:
if isinstance(subset, basestring):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx)
@since(1.4)
def replace(self, to_replace, value=_NoValue, subset=None):
"""Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
Values to_replace and value must have the same type and can only be numerics, booleans,
or strings. Value can have None. When replacing, the new value will be cast
to the type of the existing column.
For numeric replacements all values to be replaced should have unique
floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`)
and arbitrary replacement will be used.
:param to_replace: bool, int, long, float, string, list or dict.
Value to be replaced.
If the value is a dict, then `value` is ignored or can be omitted, and `to_replace`
must be a mapping between a value and a replacement.
:param value: bool, int, long, float, string, list or None.
The replacement value must be a bool, int, long, float, string or None. If `value` is a
list, `value` should be of the same length and type as `to_replace`.
If `value` is a scalar and `to_replace` is a sequence, then `value` is
used as a replacement for each item in `to_replace`.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.replace(10, 20).show()
+----+------+-----+
| age|height| name|
+----+------+-----+
| 20| 80|Alice|
| 5| null| Bob|
|null| null| Tom|
|null| null| null|
+----+------+-----+
>>> df4.na.replace('Alice', None).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace({'Alice': None}).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80| A|
| 5| null| B|
|null| null| Tom|
|null| null|null|
+----+------+----+
"""
if value is _NoValue:
if isinstance(to_replace, dict):
value = None
else:
raise TypeError("value argument is required when to_replace is not a dictionary.")
# Helper functions
def all_of(types):
"""Given a type or tuple of types and a sequence of xs
check if each x is instance of type(s)
>>> all_of(bool)([True, False])
True
>>> all_of(basestring)(["a", 1])
False
"""
def all_of_(xs):
return all(isinstance(x, types) for x in xs)
return all_of_
all_of_bool = all_of(bool)
all_of_str = all_of(basestring)
all_of_numeric = all_of((float, int, long))
# Validate input types
valid_types = (bool, float, int, long, basestring, list, tuple)
if not isinstance(to_replace, valid_types + (dict, )):
raise ValueError(
"to_replace should be a bool, float, int, long, string, list, tuple, or dict. "
"Got {0}".format(type(to_replace)))
if not isinstance(value, valid_types) and value is not None \
and not isinstance(to_replace, dict):
raise ValueError("If to_replace is not a dict, value should be "
"a bool, float, int, long, string, list, tuple or None. "
"Got {0}".format(type(value)))
if isinstance(to_replace, (list, tuple)) and isinstance(value, (list, tuple)):
if len(to_replace) != len(value):
raise ValueError("to_replace and value lists should be of the same length. "
"Got {0} and {1}".format(len(to_replace), len(value)))
if not (subset is None or isinstance(subset, (list, tuple, basestring))):
raise ValueError("subset should be a list or tuple of column names, "
"column name or None. Got {0}".format(type(subset)))
# Reshape input arguments if necessary
if isinstance(to_replace, (float, int, long, basestring)):
to_replace = [to_replace]
if isinstance(to_replace, dict):
rep_dict = to_replace
if value is not None:
warnings.warn("to_replace is a dict and value is not None. value will be ignored.")
else:
if isinstance(value, (float, int, long, basestring)) or value is None:
value = [value for _ in range(len(to_replace))]
rep_dict = dict(zip(to_replace, value))
if isinstance(subset, basestring):
subset = [subset]
# Verify we were not passed in mixed type generics.
if not any(all_of_type(rep_dict.keys())
and all_of_type(x for x in rep_dict.values() if x is not None)
for all_of_type in [all_of_bool, all_of_str, all_of_numeric]):
raise ValueError("Mixed type replacements are not supported")
if subset is None:
return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx)
else:
return DataFrame(
self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)
@since(2.0)
def approxQuantile(self, col, probabilities, relativeError):
"""
Calculates the approximate quantiles of numerical columns of a
DataFrame.
The result of this algorithm has the following deterministic bound:
If the DataFrame has N elements and if we request the quantile at
probability `p` up to error `err`, then the algorithm will return
a sample `x` from the DataFrame so that the *exact* rank of `x` is
close to (p * N). More precisely,
floor((p - err) * N) <= rank(x) <= ceil((p + err) * N).
This method implements a variation of the Greenwald-Khanna
algorithm (with some speed optimizations). The algorithm was first
present in [[https://doi.org/10.1145/375663.375670
Space-efficient Online Computation of Quantile Summaries]]
by Greenwald and Khanna.
Note that null values will be ignored in numerical columns before calculation.
For columns only containing null values, an empty list is returned.
:param col: str, list.
Can be a single column name, or a list of names for multiple columns.
:param probabilities: a list of quantile probabilities
Each number must belong to [0, 1].
For example 0 is the minimum, 0.5 is the median, 1 is the maximum.
:param relativeError: The relative target precision to achieve
(>= 0). If set to zero, the exact quantiles are computed, which
could be very expensive. Note that values greater than 1 are
accepted but give the same result as 1.
:return: the approximate quantiles at the given probabilities. If
the input `col` is a string, the output is a list of floats. If the
input `col` is a list or tuple of strings, the output is also a
list, but each element in it is a list of floats, i.e., the output
is a list of list of floats.
.. versionchanged:: 2.2
Added support for multiple columns.
"""
if not isinstance(col, (basestring, list, tuple)):
raise ValueError("col should be a string, list or tuple, but got %r" % type(col))
isStr = isinstance(col, basestring)
if isinstance(col, tuple):
col = list(col)
elif isStr:
col = [col]
for c in col:
if not isinstance(c, basestring):
raise ValueError("columns should be strings, but got %r" % type(c))
col = _to_list(self._sc, col)
if not isinstance(probabilities, (list, tuple)):
raise ValueError("probabilities should be a list or tuple")
if isinstance(probabilities, tuple):
probabilities = list(probabilities)
for p in probabilities:
if not isinstance(p, (float, int, long)) or p < 0 or p > 1:
raise ValueError("probabilities should be numerical (float, int, long) in [0,1].")
probabilities = _to_list(self._sc, probabilities)
if not isinstance(relativeError, (float, int, long)) or relativeError < 0:
raise ValueError("relativeError should be numerical (float, int, long) >= 0.")
relativeError = float(relativeError)
jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError)
jaq_list = [list(j) for j in jaq]
return jaq_list[0] if isStr else jaq_list
@since(1.4)
def corr(self, col1, col2, method=None):
"""
Calculates the correlation of two columns of a DataFrame as a double value.
Currently only supports the Pearson Correlation Coefficient.
:func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other.
:param col1: The name of the first column
:param col2: The name of the second column
:param method: The correlation method. Currently only supports "pearson"
"""
if not isinstance(col1, basestring):
raise ValueError("col1 should be a string.")
if not isinstance(col2, basestring):
raise ValueError("col2 should be a string.")
if not method:
method = "pearson"
if not method == "pearson":
raise ValueError("Currently only the calculation of the Pearson Correlation " +
"coefficient is supported.")
return self._jdf.stat().corr(col1, col2, method)
@since(1.4)
def cov(self, col1, col2):
"""
Calculate the sample covariance for the given columns, specified by their names, as a
double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases.
:param col1: The name of the first column
:param col2: The name of the second column
"""
if not isinstance(col1, basestring):
raise ValueError("col1 should be a string.")
if not isinstance(col2, basestring):
raise ValueError("col2 should be a string.")
return self._jdf.stat().cov(col1, col2)
@since(1.4)
def crosstab(self, col1, col2):
"""
Computes a pair-wise frequency table of the given columns. Also known as a contingency
table. The number of distinct values for each column should be less than 1e4. At most 1e6
non-zero pair frequencies will be returned.
The first column of each row will be the distinct values of `col1` and the column names
will be the distinct values of `col2`. The name of the first column will be `$col1_$col2`.
Pairs that have no occurrences will have zero as their counts.
:func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases.
:param col1: The name of the first column. Distinct items will make the first item of
each row.
:param col2: The name of the second column. Distinct items will make the column names
of the DataFrame.
"""
if not isinstance(col1, basestring):
raise ValueError("col1 should be a string.")
if not isinstance(col2, basestring):
raise ValueError("col2 should be a string.")
return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sql_ctx)
@since(1.4)
def freqItems(self, cols, support=None):
"""
Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"https://doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting DataFrame.
:param cols: Names of the columns to calculate frequent items for as a list or tuple of
strings.
:param support: The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4.
"""
if isinstance(cols, tuple):
cols = list(cols)
if not isinstance(cols, list):
raise ValueError("cols must be a list or tuple of column names as strings.")
if not support:
support = 0.01
return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def withColumn(self, colName, col):
"""
Returns a new :class:`DataFrame` by adding a column or replacing the
existing column that has the same name.
The column expression must be an expression over this DataFrame; attempting to add
a column from some other dataframe will raise an error.
:param colName: string, name of the new column.
:param col: a :class:`Column` expression for the new column.
>>> df.withColumn('age2', df.age + 2).collect()
[Row(age=2, name=u'Alice', age2=4), Row(age=5, name=u'Bob', age2=7)]
"""
assert isinstance(col, Column), "col should be Column"
return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def withColumnRenamed(self, existing, new):
"""Returns a new :class:`DataFrame` by renaming an existing column.
This is a no-op if schema doesn't contain the given column name.
:param existing: string, name of the existing column to rename.
:param new: string, new name of the column.
>>> df.withColumnRenamed('age', 'age2').collect()
[Row(age2=2, name=u'Alice'), Row(age2=5, name=u'Bob')]
"""
return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)
@since(1.4)
@ignore_unicode_prefix
def drop(self, *cols):
"""Returns a new :class:`DataFrame` that drops the specified column.
This is a no-op if schema doesn't contain the given column name(s).
:param cols: a string name of the column to drop, or a
:class:`Column` to drop, or a list of string name of the columns to drop.
>>> df.drop('age').collect()
[Row(name=u'Alice'), Row(name=u'Bob')]
>>> df.drop(df.age).collect()
[Row(name=u'Alice'), Row(name=u'Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df.name).collect()
[Row(age=5, height=85, name=u'Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df2.name).collect()
[Row(age=5, name=u'Bob', height=85)]
>>> df.join(df2, 'name', 'inner').drop('age', 'height').collect()
[Row(name=u'Bob')]
"""
if len(cols) == 1:
col = cols[0]
if isinstance(col, basestring):
jdf = self._jdf.drop(col)
elif isinstance(col, Column):
jdf = self._jdf.drop(col._jc)
else:
raise TypeError("col should be a string or a Column")
else:
for col in cols:
if not isinstance(col, basestring):
raise TypeError("each col in the param list should be a string")
jdf = self._jdf.drop(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
def toDF(self, *cols):
"""Returns a new class:`DataFrame` that with new specified column names
:param cols: list of new column names (string)
>>> df.toDF('f1', 'f2').collect()
[Row(f1=2, f2=u'Alice'), Row(f1=5, f2=u'Bob')]
"""
jdf = self._jdf.toDF(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def toPandas(self):
"""
Returns the contents of this :class:`DataFrame` as Pandas ``pandas.DataFrame``.
This is only available if Pandas is installed and available.
.. note:: This method should only be used if the resulting Pandas's DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
.. note:: Usage with spark.sql.execution.arrow.enabled=True is experimental.
>>> df.toPandas() # doctest: +SKIP
age name
0 2 Alice
1 5 Bob
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
import pandas as pd
if self.sql_ctx._conf.pandasRespectSessionTimeZone():
timezone = self.sql_ctx._conf.sessionLocalTimeZone()
else:
timezone = None
if self.sql_ctx._conf.arrowEnabled():
use_arrow = True
try:
from pyspark.sql.types import to_arrow_schema
from pyspark.sql.utils import require_minimum_pyarrow_version
require_minimum_pyarrow_version()
to_arrow_schema(self.schema)
except Exception as e:
if self.sql_ctx._conf.arrowFallbackEnabled():
msg = (
"toPandas attempted Arrow optimization because "
"'spark.sql.execution.arrow.enabled' is set to true; however, "
"failed by the reason below:\n %s\n"
"Attempting non-optimization as "
"'spark.sql.execution.arrow.fallback.enabled' is set to "
"true." % _exception_message(e))
warnings.warn(msg)
use_arrow = False
else:
msg = (
"toPandas attempted Arrow optimization because "
"'spark.sql.execution.arrow.enabled' is set to true, but has reached "
"the error below and will not continue because automatic fallback "
"with 'spark.sql.execution.arrow.fallback.enabled' has been set to "
"false.\n %s" % _exception_message(e))
warnings.warn(msg)
raise
# Try to use Arrow optimization when the schema is supported and the required version
# of PyArrow is found, if 'spark.sql.execution.arrow.enabled' is enabled.
if use_arrow:
try:
from pyspark.sql.types import _check_dataframe_convert_date, \
_check_dataframe_localize_timestamps
import pyarrow
batches = self._collectAsArrow()
if len(batches) > 0:
table = pyarrow.Table.from_batches(batches)
pdf = table.to_pandas()
pdf = _check_dataframe_convert_date(pdf, self.schema)
return _check_dataframe_localize_timestamps(pdf, timezone)
else:
return pd.DataFrame.from_records([], columns=self.columns)
except Exception as e:
# We might have to allow fallback here as well but multiple Spark jobs can
# be executed. So, simply fail in this case for now.
msg = (
"toPandas attempted Arrow optimization because "
"'spark.sql.execution.arrow.enabled' is set to true, but has reached "
"the error below and can not continue. Note that "
"'spark.sql.execution.arrow.fallback.enabled' does not have an effect "
"on failures in the middle of computation.\n %s" % _exception_message(e))
warnings.warn(msg)
raise
# Below is toPandas without Arrow optimization.
pdf = pd.DataFrame.from_records(self.collect(), columns=self.columns)
dtype = {}
for field in self.schema:
pandas_type = _to_corrected_pandas_type(field.dataType)
# SPARK-21766: if an integer field is nullable and has null values, it can be
# inferred by pandas as float column. Once we convert the column with NaN back
# to integer type e.g., np.int16, we will hit exception. So we use the inferred
# float type, not the corrected type from the schema in this case.
if pandas_type is not None and \
not(isinstance(field.dataType, IntegralType) and field.nullable and
pdf[field.name].isnull().any()):
dtype[field.name] = pandas_type
for f, t in dtype.items():
pdf[f] = pdf[f].astype(t, copy=False)
if timezone is None:
return pdf
else:
from pyspark.sql.types import _check_series_convert_timestamps_local_tz
for field in self.schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
pdf[field.name] = \
_check_series_convert_timestamps_local_tz(pdf[field.name], timezone)
return pdf
def _collectAsArrow(self):
"""
Returns all records as a list of ArrowRecordBatches, pyarrow must be installed
and available on driver and worker Python environments.
.. note:: Experimental.
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.collectAsArrowToPython()
# Collect list of un-ordered batches where last element is a list of correct order indices
results = list(_load_from_socket(sock_info, ArrowCollectSerializer()))
batches = results[:-1]
batch_order = results[-1]
# Re-order the batch list using the correct order
return [batches[i] for i in batch_order]
##########################################################################################
# Pandas compatibility
##########################################################################################
groupby = copy_func(
groupBy,
sinceversion=1.4,
doc=":func:`groupby` is an alias for :func:`groupBy`.")
drop_duplicates = copy_func(
dropDuplicates,
sinceversion=1.4,
doc=":func:`drop_duplicates` is an alias for :func:`dropDuplicates`.")
where = copy_func(
filter,
sinceversion=1.3,
doc=":func:`where` is an alias for :func:`filter`.")
def _to_scala_map(sc, jm):
"""
Convert a dict into a JVM Map.
"""
return sc._jvm.PythonUtils.toScalaMap(jm)
def _to_corrected_pandas_type(dt):
"""
When converting Spark SQL records to Pandas DataFrame, the inferred data type may be wrong.
This method gets the corrected data type for Pandas if that type may be inferred uncorrectly.
"""
import numpy as np
if type(dt) == ByteType:
return np.int8
elif type(dt) == ShortType:
return np.int16
elif type(dt) == IntegerType:
return np.int32
elif type(dt) == FloatType:
return np.float32
else:
return None
class DataFrameNaFunctions(object):
"""Functionality for working with missing data in :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def drop(self, how='any', thresh=None, subset=None):
return self.df.dropna(how=how, thresh=thresh, subset=subset)
drop.__doc__ = DataFrame.dropna.__doc__
def fill(self, value, subset=None):
return self.df.fillna(value=value, subset=subset)
fill.__doc__ = DataFrame.fillna.__doc__
def replace(self, to_replace, value=_NoValue, subset=None):
return self.df.replace(to_replace, value, subset)
replace.__doc__ = DataFrame.replace.__doc__
class DataFrameStatFunctions(object):
"""Functionality for statistic functions with :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def approxQuantile(self, col, probabilities, relativeError):
return self.df.approxQuantile(col, probabilities, relativeError)
approxQuantile.__doc__ = DataFrame.approxQuantile.__doc__
def corr(self, col1, col2, method=None):
return self.df.corr(col1, col2, method)
corr.__doc__ = DataFrame.corr.__doc__
def cov(self, col1, col2):
return self.df.cov(col1, col2)
cov.__doc__ = DataFrame.cov.__doc__
def crosstab(self, col1, col2):
return self.df.crosstab(col1, col2)
crosstab.__doc__ = DataFrame.crosstab.__doc__
def freqItems(self, cols, support=None):
return self.df.freqItems(cols, support)
freqItems.__doc__ = DataFrame.freqItems.__doc__
def sampleBy(self, col, fractions, seed=None):
return self.df.sampleBy(col, fractions, seed)
sampleBy.__doc__ = DataFrame.sampleBy.__doc__
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext, SparkSession
import pyspark.sql.dataframe
from pyspark.sql.functions import from_unixtime
globs = pyspark.sql.dataframe.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['spark'] = SparkSession(sc)
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df2'] = sc.parallelize([Row(name='Tom', height=80), Row(name='Bob', height=85)]).toDF()
globs['df3'] = sc.parallelize([Row(name='Alice', age=2),
Row(name='Bob', age=5)]).toDF()
globs['df4'] = sc.parallelize([Row(name='Alice', age=10, height=80),
Row(name='Bob', age=5, height=None),
Row(name='Tom', age=None, height=None),
Row(name=None, age=None, height=None)]).toDF()
globs['df5'] = sc.parallelize([Row(name='Alice', spy=False, age=10),
Row(name='Bob', spy=None, age=5),
Row(name='Mallory', spy=True, age=None)]).toDF()
globs['sdf'] = sc.parallelize([Row(name='Tom', time=1479441846),
Row(name='Bob', time=1479442946)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.dataframe, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
|
LiaoPan/scikit-learn
|
examples/plot_isotonic_regression.py
|
303
|
1767
|
"""
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
|
bsd-3-clause
|
henrykironde/scikit-learn
|
examples/linear_model/plot_logistic_path.py
|
349
|
1195
|
#!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
odejesush/tensorflow
|
tensorflow/contrib/learn/python/learn/tests/dataframe/arithmetic_transform_test.py
|
18
|
2568
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for arithmetic transforms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import tensorflow_dataframe as df
from tensorflow.python.platform import test
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class SumTestCase(test.TestCase):
"""Test class for `Sum` transform."""
def testSum(self):
if not HAS_PANDAS:
return
num_rows = 100
pandas_df = pd.DataFrame({
"a": np.arange(num_rows),
"b": np.arange(num_rows, 2 * num_rows)
})
frame = df.TensorFlowDataFrame.from_pandas(
pandas_df, shuffle=False, batch_size=num_rows)
frame["a+b"] = frame["a"] + frame["b"]
expected_sum = pandas_df["a"] + pandas_df["b"]
actual_sum = frame.run_one_batch()["a+b"]
np.testing.assert_array_equal(expected_sum, actual_sum)
class DifferenceTestCase(test.TestCase):
"""Test class for `Difference` transform."""
def testDifference(self):
if not HAS_PANDAS:
return
num_rows = 100
pandas_df = pd.DataFrame({
"a": np.arange(num_rows),
"b": np.arange(num_rows, 2 * num_rows)
})
frame = df.TensorFlowDataFrame.from_pandas(
pandas_df, shuffle=False, batch_size=num_rows)
frame["a-b"] = frame["a"] - frame["b"]
expected_diff = pandas_df["a"] - pandas_df["b"]
actual_diff = frame.run_one_batch()["a-b"]
np.testing.assert_array_equal(expected_diff, actual_diff)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
xmnlab/pywim
|
pywim/estimation/accuracy_calculation/cost323.py
|
2
|
8361
|
from scipy.stats import norm
from scipy.optimize import fsolve
from scipy import stats
import numpy as np
import pandas as pd
def calc_min_confidence(
data: pd.DataFrame, test_plan: str, env_condition: str
):
"""
=100*(2*NORMDIST(\\
IF(\$B\$5="r1";2,675/IF(\$D\$5="III";1,1;IF(\$D\$5="II";1,05;1));\\
IF(\$B\$5="r2";2,36/IF(\$D\$5="III";1,1;IF(\$D\$5="II";1,05;1));\\
IF(\$B\$5="RR1";2,155/IF(\$D\$5="III";1,1;IF(\$D\$5="II";1,05;1));\\
IF(\$B\$5="RR2";2/IF(\$D\$5="III";1,1;IF(\$D\$5="II";1,05;1));0))\\
))-TINV(0,05;B9-1)/SQRT(B9);0;1;TRUE())-1)
"""
# data = data.copy()
def _calc(v: float):
if env_condition == 'I':
_v = 1
elif env_condition == 'II':
_v = 1.05
elif env_condition == 'III':
_v = 1.1
else:
raise Exception('INVALID_ENV_CONDITION')
if test_plan == 'R1':
_v = 2.675 / _v
elif test_plan == 'R2':
_v = 2.36 / _v
elif test_plan == 'RR1':
_v = 2.155 / _v
elif test_plan == 'RR2':
_v = 2 / _v
else:
raise Exception('INVALID_TEST_PLAN')
# in isf, the probabity is divided by 2
# because in excel TINV is 2-side tail
return 100 * (
2 * norm.cdf(_v - stats.t.isf(0.05 / 2, v - 1) / np.sqrt(v), 0,
1) - 1
)
test_plan = test_plan.upper()
env_condition = env_condition.upper()
data['min_confidence'] = data.number.apply(_calc)
return data
def calc_best_acceptable_class(
data: pd.DataFrame, initial_verification: bool
) -> pd.DataFrame:
"""
"""
# data = data.copy()
# IF(M$4=0;1;1,25)
factor = 1.25 if initial_verification else 1
best_acceptable_class = []
# gwv
# =I9∗IF(M$4=0;1;1,25)
best_acceptable_class.append(data.loc['gwv', 'min_tolerance'] * factor)
# group of axles
# =IF(I10<10; 0,7∗I10∗IF(M$4=0;1;1,25); I10∗IF(M$4=0;1;1,25)−3)
v = data.loc['group_axles', 'min_tolerance']
v = v * 0.7 * factor if v < 10 else v * factor - 3
best_acceptable_class.append(v)
# single axle
# =IF(I11<15;
# I11∗(I11∗IF(M$4=0;1;1,25)+97)∗IF(M$4=0;1;1,25)/168;
# I11∗IF(M$4=0;1;1,25)−5)
v = data.loc['single_axle', 'min_tolerance']
v = v * (v * factor + 97) * factor / 168 if v < 15 else v * factor - 5
best_acceptable_class.append(v)
# axle of a group
# =IF(I12<20;I12∗IF(M$4=0;1;1,25)/2;(I12∗IF(M$4=0;1;1,25)−10))
v = data.loc['axle_group', 'min_tolerance']
v = v * factor / 2 if v < 20 else v * factor - 10
best_acceptable_class.append(v)
data['best_acceptable_class'] = best_acceptable_class
return data
def calc_classification(data: pd.DataFrame) -> pd.DataFrame:
"""
=IF(OR(J9<=5;J9>7);ROUNDUP((J9/5);0)*5;7)
"""
def _calc(v: float):
# =IF(OR(J9<=5;J9>7);ROUNDUP((J9/5);0)*5;7)
return np.ceil(v / 5) * 5 if v <= 5 or v > 7 else 7
data['class_value'] = data.best_acceptable_class.apply(_calc)
return data
def resolve_class_name(data: pd.DataFrame) -> pd.DataFrame:
"""
=IF(K9<=5;CONCATENATE("A(";TEXT(K9;"0");")");\\
IF(K9<=7;CONCATENATE("B+(";TEXT(K9;"0");")");\\
IF(K9<=10;CONCATENATE("B(";TEXT(K9;"0");")");\\
IF(K9<=15;CONCATENATE("C(";TEXT(K9;"0");")");\\
IF(K9<=20;CONCATENATE("D+(";TEXT(K9;"0");")");\\
IF(K9<=25;CONCATENATE("D(";TEXT(K9;"0");")");\\
CONCATENATE("E(";TEXT(K9;"0");")")))))))
"""
def _resolve(v: int):
c = (
'A' if v <= 5 else
'B+' if v <= 7 else
'B' if v <= 10 else
'C' if v <= 15 else
'D+' if v <= 20 else
'D' if v <= 25 else
'E'
)
return '%s(%s)' % (c, int(v))
data['class_name'] = data.class_value.apply(_resolve)
return data
def calc_delta(data: pd.DataFrame, initial_verification: bool):
"""
"""
d = []
# factor
# IF(M$4=0;1;0,8)
factor = 0.8 if initial_verification else 1
# gwv
# =K9*IF(M$4=0;1;0,8)
d.append(data.loc['gwv', 'class_value'] * factor)
# group of axles
# =IF(K10<7;K10/0,7;IF(K10<30;K10+3;K10*1,1))*IF(M$4=0;1;0,8)
v = data.loc['group_axles', 'class_value']
v = v / 0.7 if v < 7 else v + 3 if v < 30 else v * 1.1
d.append(v * factor)
# single axle
# =IF(K11<10;K11*(85-K11)/50;IF(K11<25;K11+5;6*K11/5))*IF(M$4=0;1;0,8)
v = data.loc['single_axle', 'class_value']
v = v * (85 - v) / 50 if v < 10 else v + 5 if v < 25 else 6 * v / 5
d.append(v * factor)
# axle of group
# =IF(K12<10;2*K12;IF(K12<25;K12+10;6*K12/5+5))*IF(M$4=0;1;0,8)
v = data.loc['axle_group', 'class_value']
v = 2 * v if v < 10 else v + 10 if v < 25 else 6 * v / 5 + 5
d.append(v * factor)
data['d'] = d
return d
def calc_confidence_level(data: pd.DataFrame) -> pd.DataFrame:
"""
* Number (column B) [Input],
* Identified (column C) [Input],
* Mean (column D) [Input],
* Std deviat (column E) [Input],
* p_o (column F),
* Class (column G),
* d (column H),
* d_min (column I) [Input/Minimization Solver Output],
* d_c (column J),
* class (column K),
* p (column L) - related to column I,
* p (column M) - related to column H,
* Accepted (column O)
=100*(
1-TDIST((H9/E9-D9/E9)-TINV(0,05;B9-1)/SQRT(B9);B9-1;1)-
TDIST((H9/E9+D9/E9)-TINV(0,05;B9-1)/SQRT(B9);B9-1;1)
)
"""
def _calc(v: pd.Series) -> pd.Series:
return 100 * (
1 - stats.t.sf(
(v.d / v['std'] - v['mean'] / v['std']) -
stats.t.isf(0.05 / 2, v.number - 1) /
np.sqrt(v.number),
v.number - 1
) - stats.t.sf(
(v.d / v['std'] + v['mean'] / v['std']) -
stats.t.isf(0.05 / 2, v.number - 1) /
np.sqrt(v.number), v.number - 1
)
)
data['confidence_level'] = data.T.apply(_calc)
return data
def resolve_accepted_class(data: pd.DataFrame) -> str:
"""
O12 = MAX(K11:K12)
=IF(O12<=5;CONCATENATE("A(";TEXT(O12;"0");")");\\
IF(O12<=7;CONCATENATE("B+(";TEXT(O12;"0");")");\\
IF(O12<=10;CONCATENATE("B(";TEXT(O12;"0");")");\\
IF(O12<=15;CONCATENATE("C(";TEXT(O12;"0");")");\\
IF(O12<=20;CONCATENATE("D+(";TEXT(O12;"0");")");\\
IF(O12<=25;CONCATENATE("D(";TEXT(O12;"0");")");\\
CONCATENATE("E(";TEXT(O12;"0");")")))))))
"""
v = data['class_value'].max()
c = (
'A' if v <= 5 else
'B+' if v <= 7 else
'B' if v <= 10 else
'C' if v <= 15 else
'D+' if v <= 20 else
'D' if v <= 25 else
'E'
)
return '%s(%s)' % (c, int(v))
def solver_min_tolerance(data: pd.DataFrame) -> pd.DataFrame:
"""
* Number (column B) [Input],
* Identified (column C) [Input],
* Mean (column D) [Input],
* Std deviat (column E) [Input],
* p_o (column F),
* Class (column G),
* d (column H),
* d_min (column I) [Input/Minimization Solver Output],
* d_c (column J),
* class (column K),
* p (column L) - related to column I,
* p (column M) - related to column H,
* Accepted (column O)
=100*(
1-
TDIST((I9/E9-D9/E9)-TINV(0,05;B9-1)/SQRT(B9);B9-1;1)-
TDIST((I9/E9+D9/E9)-TINV(0,05;B9-1)/SQRT(B9);B9-1;1)
)
"""
for i in data.index:
s = data.loc[i, :]
_number = s['number']
_mean = s['mean']
_std = s['std']
_min_confidence = s['min_confidence']
_factor = stats.t.isf(0.05 / 2, _number - 1) / np.sqrt(_number)
_dof = _number - 1
def func(_min_tolerance):
return _min_confidence - 100 * (
1 -
stats.t.sf(
(_min_tolerance / _std - _mean / _std) - _factor, _dof) -
stats.t.sf(
(_min_tolerance / _std + _mean / _std) - _factor, _dof)
)
try:
data.loc[i, 'min_tolerance'] = fsolve(func, [1])[0]
except:
data.loc[i, 'min_tolerance'] = np.nan
return data
|
mit
|
ryfeus/lambda-packs
|
LightGBM_sklearn_scipy_numpy/source/sklearn/tests/test_discriminant_analysis.py
|
29
|
13926
|
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.discriminant_analysis import _cov
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
# Data is just 9 separable points in the plane
X6 = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y6 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y7 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X7 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8, 3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct
# values for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = LinearDiscriminantAnalysis(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_priors():
# Test priors (negative priors)
priors = np.array([0.5, -0.5])
clf = LinearDiscriminantAnalysis(priors=priors)
msg = "priors must be non-negative"
assert_raise_message(ValueError, msg, clf.fit, X, y)
# Test that priors passed as a list are correctly handled (run to see if
# failure)
clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5])
clf.fit(X, y)
# Test that priors always sum to 1
priors = np.array([0.5, 0.6])
prior_norm = np.array([0.45, 0.55])
clf = LinearDiscriminantAnalysis(priors=priors)
assert_warns(UserWarning, clf.fit, X, y)
assert_array_almost_equal(clf.priors_, prior_norm, 2)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr")
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = LinearDiscriminantAnalysis(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_explained_variance_ratio():
# Test if the sum of the normalized eigen vectors values equals 1,
# Also tests whether the explained_variance_ratio_ formed by the
# eigen solver is the same as the explained_variance_ratio_ formed
# by the svd solver
state = np.random.RandomState(0)
X = state.normal(loc=0, scale=100, size=(40, 20))
y = state.randint(0, 3, size=(40,))
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_eigen.fit(X, y)
assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3)
assert_equal(clf_lda_eigen.explained_variance_ratio_.shape, (2,),
"Unexpected length for explained_variance_ratio_")
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_svd.fit(X, y)
assert_almost_equal(clf_lda_svd.explained_variance_ratio_.sum(), 1.0, 3)
assert_equal(clf_lda_svd.explained_variance_ratio_.shape, (2,),
"Unexpected length for explained_variance_ratio_")
assert_array_almost_equal(clf_lda_svd.explained_variance_ratio_,
clf_lda_eigen.explained_variance_ratio_)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_lda_store_covariance():
# Test for slover 'lsqr' and 'eigen'
# 'store_covariance' has no effect on 'lsqr' and 'eigen' solvers
for solver in ('lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver).fit(X6, y6)
assert_true(hasattr(clf, 'covariance_'))
# Test the actual attribute:
clf = LinearDiscriminantAnalysis(solver=solver,
store_covariance=True).fit(X6, y6)
assert_true(hasattr(clf, 'covariance_'))
assert_array_almost_equal(
clf.covariance_,
np.array([[0.422222, 0.088889], [0.088889, 0.533333]])
)
# Test for SVD slover, the default is to not set the covariances_ attribute
clf = LinearDiscriminantAnalysis(solver='svd').fit(X6, y6)
assert_false(hasattr(clf, 'covariance_'))
# Test the actual attribute:
clf = LinearDiscriminantAnalysis(solver=solver,
store_covariance=True).fit(X6, y6)
assert_true(hasattr(clf, 'covariance_'))
assert_array_almost_equal(
clf.covariance_,
np.array([[0.422222, 0.088889], [0.088889, 0.533333]])
)
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
assert_array_equal(y_pred, y6)
# Assure that it works with 1D data
y_pred1 = clf.fit(X7, y6).predict(X7)
assert_array_equal(y_pred1, y6)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X7)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6)
y_log_proba_pred1 = clf.predict_log_proba(X7)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X6, y7).predict(X6)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y7))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X6, y4)
def test_qda_priors():
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = QuadraticDiscriminantAnalysis(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X6, y6).predict(X6)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariance():
# The default is to not set the covariances_ attribute
clf = QuadraticDiscriminantAnalysis().fit(X6, y6)
assert_false(hasattr(clf, 'covariance_'))
# Test the actual attribute:
clf = QuadraticDiscriminantAnalysis(store_covariance=True).fit(X6, y6)
assert_true(hasattr(clf, 'covariance_'))
assert_array_almost_equal(
clf.covariance_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariance_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_deprecation():
# Test the deprecation
clf = QuadraticDiscriminantAnalysis(store_covariances=True)
assert_warns_message(DeprecationWarning, "'store_covariances' was renamed"
" to store_covariance in version 0.19 and will be "
"removed in 0.21.", clf.fit, X, y)
# check that covariance_ (and covariances_ with warning) is stored
assert_warns_message(DeprecationWarning, "Attribute covariances_ was "
"deprecated in version 0.19 and will be removed "
"in 0.21. Use covariance_ instead", getattr, clf,
'covariances_')
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = QuadraticDiscriminantAnalysis()
with ignore_warnings():
y_pred = clf.fit(X2, y6).predict(X2)
assert_true(np.any(y_pred != y6))
# adding a little regularization fixes the problem
clf = QuadraticDiscriminantAnalysis(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y6)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y6)
# Case n_samples_in_a_class < n_features
clf = QuadraticDiscriminantAnalysis(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
def test_covariance():
x, y = make_blobs(n_samples=100, n_features=5,
centers=1, random_state=42)
# make features correlated
x = np.dot(x, np.arange(x.shape[1] ** 2).reshape(x.shape[1], x.shape[1]))
c_e = _cov(x, 'empirical')
assert_almost_equal(c_e, c_e.T)
c_s = _cov(x, 'auto')
assert_almost_equal(c_s, c_s.T)
|
mit
|
MengGuo/mix_initiative
|
utilities/mix_u/mix_u.py
|
1
|
2734
|
from math import exp
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True
matplotlib.rcParams['text.usetex'] = True
def rho(s):
if (s > 0):
return exp(-1.0/s)
else:
return 0
def smooth_mix(tele_control, navi_control, dist_to_trap):
ds = 0.4
epsilon = 0.1
mix_control = 0
gain = rho(dist_to_trap-ds)/(rho(dist_to_trap-ds)+rho(epsilon+ds-dist_to_trap))
mix_control = navi_control + gain*tele_control
#mix_control = (1-gain)*navi_control + gain*tele_control
return mix_control, gain
def draw_mix_u(d, navi_u, tele_u, mix_u, gain):
fig = plt.figure(figsize=(10,3))
gs = gridspec.GridSpec(1, 2, width_ratios=[1, 2])
ax1 = plt.subplot(gs[0])
ds = 0.4
epsilon = 0.1
dd = [-ds+i*0.005 for i in range(200)]
gg = [rho(ddi)/(rho(ddi)+rho(epsilon-ddi)) for ddi in dd]
ax1.plot(dd, gg, color='k', linestyle='-',
linewidth=3, label=r'$\kappa(\cdot)$')
ax1.set_xlabel(r'$d_t-d_s(m)$',fontsize=20)
ax1.legend(loc = (.55,.62), labelspacing=0.7, numpoints=3, handlelength=2.5, ncol=1, prop={'size':15})
ax1.set_xlim(dd[0], dd[-1])
ax1.grid()
ax2 = plt.subplot(gs[1])
ax2.plot(d, navi_u, color='r', linestyle='-',
linewidth=3, marker='o', mfc='r',
fillstyle='full', markersize=6,label=r'$u_r$')
ax2.plot(d, tele_u,color='g', linestyle='-',
linewidth=3, marker='d', mfc='g',
fillstyle='full', markersize=6,label=r'$u_h$')
ax2.plot(d, mix_u, color='b', linestyle='-',
linewidth=3, marker='>', mfc='b',
fillstyle='full', markersize=6,label=r'$u$')
ax2.plot(d, gain, color='k', linestyle='-',
linewidth=3, marker='*', mfc='k',
fillstyle='full', markersize=6,label=r'$\kappa$')
ax2.set_xlabel(r'$d_t(m)$',fontsize=20)
ax2.legend(loc = (.6,.62), numpoints=3, handlelength=2.0, ncol=2, prop={'size':15})
ax2.set_xlim(0, d[-1]+0.01)
ax2.grid()
fig.tight_layout()
plt.savefig('mix_u.pdf',bbox_inches='tight')
return fig
if __name__ == "__main__":
#D = [k*0.05 for k in range(30)]
D = list(numpy.arange(0,0.4,0.05)) + list(numpy.arange(0.4,0.5,0.01)) + list(numpy.arange(0.5,1.1,0.05))
navi_U = [2 for k in range(len(D))]
tele_U = [1.5 for k in range(len(D))]
mix_U = []
Gain = []
for k in range(len(D)):
d = D[k]
navi_u = navi_U[k]
tele_u = tele_U[k]
mix_u, gain = smooth_mix(tele_u, navi_u, d)
mix_U.append(mix_u)
Gain.append(gain)
draw_mix_u(D, navi_U, tele_U, mix_U, Gain)
|
gpl-2.0
|
kdebrab/pandas
|
pandas/tests/arrays/categorical/test_analytics.py
|
4
|
12475
|
# -*- coding: utf-8 -*-
import pytest
import sys
import numpy as np
import pandas.util.testing as tm
from pandas import Categorical, Index, Series
from pandas.compat import PYPY
class TestCategoricalAnalytics(object):
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
pytest.raises(TypeError, lambda: cat.min())
pytest.raises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
assert _min == "a"
assert _max == "d"
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
assert _min == "d"
assert _max == "a"
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
assert np.isnan(_min)
assert _max == "b"
_min = cat.min(numeric_only=True)
assert _min == "c"
_max = cat.max(numeric_only=True)
assert _max == "b"
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
assert np.isnan(_min)
assert _max == 1
_min = cat.min(numeric_only=True)
assert _min == 2
_max = cat.max(numeric_only=True)
assert _max == 1
@pytest.mark.parametrize("values,categories,exp_mode", [
([1, 1, 2, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5]),
([1, 1, 1, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5, 1]),
([1, 2, 3, 4, 5], [5, 4, 3, 2, 1], [5, 4, 3, 2, 1]),
([np.nan, np.nan, np.nan, 4, 5], [5, 4, 3, 2, 1], [5, 4]),
([np.nan, np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]),
([np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4])])
def test_mode(self, values, categories, exp_mode):
s = Categorical(values, categories=categories, ordered=True)
res = s.mode()
exp = Categorical(exp_mode, categories=categories, ordered=True)
tm.assert_categorical_equal(res, exp)
def test_searchsorted(self):
# https://github.com/pandas-dev/pandas/issues/8420
# https://github.com/pandas-dev/pandas/issues/14522
c1 = Categorical(['cheese', 'milk', 'apple', 'bread', 'bread'],
categories=['cheese', 'milk', 'apple', 'bread'],
ordered=True)
s1 = Series(c1)
c2 = Categorical(['cheese', 'milk', 'apple', 'bread', 'bread'],
categories=['cheese', 'milk', 'apple', 'bread'],
ordered=False)
s2 = Series(c2)
# Searching for single item argument, side='left' (default)
res_cat = c1.searchsorted('apple')
res_ser = s1.searchsorted('apple')
exp = np.array([2], dtype=np.intp)
tm.assert_numpy_array_equal(res_cat, exp)
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for single item array, side='left' (default)
res_cat = c1.searchsorted(['bread'])
res_ser = s1.searchsorted(['bread'])
exp = np.array([3], dtype=np.intp)
tm.assert_numpy_array_equal(res_cat, exp)
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for several items array, side='right'
res_cat = c1.searchsorted(['apple', 'bread'], side='right')
res_ser = s1.searchsorted(['apple', 'bread'], side='right')
exp = np.array([3, 5], dtype=np.intp)
tm.assert_numpy_array_equal(res_cat, exp)
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for a single value that is not from the Categorical
pytest.raises(ValueError, lambda: c1.searchsorted('cucumber'))
pytest.raises(ValueError, lambda: s1.searchsorted('cucumber'))
# Searching for multiple values one of each is not from the Categorical
pytest.raises(ValueError,
lambda: c1.searchsorted(['bread', 'cucumber']))
pytest.raises(ValueError,
lambda: s1.searchsorted(['bread', 'cucumber']))
# searchsorted call for unordered Categorical
pytest.raises(ValueError, lambda: c2.searchsorted('apple'))
pytest.raises(ValueError, lambda: s2.searchsorted('apple'))
with tm.assert_produces_warning(FutureWarning):
res = c1.searchsorted(v=['bread'])
exp = np.array([3], dtype=np.intp)
tm.assert_numpy_array_equal(res, exp)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = Index(["a", "b"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
tm.assert_categorical_equal(res, cat)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = Index(["c", "a", "b"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
exp_cat = Categorical(exp, categories=['c', 'a', 'b'])
tm.assert_categorical_equal(res, exp_cat)
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = Index(["b", "a"])
tm.assert_index_equal(res.categories, exp)
exp_cat = Categorical(["b", np.nan, "a"], categories=["b", "a"])
tm.assert_categorical_equal(res, exp_cat)
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp_cat = Categorical(['b', 'a'], categories=['a', 'b'], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp_cat = Categorical(['c', 'b', 'a'], categories=['a', 'b', 'c'],
ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp_cat = Categorical(['b', 'a'], categories=['a', 'b'], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp_cat = Categorical(['b', np.nan, 'a'], categories=['a', 'b'],
ordered=True)
tm.assert_categorical_equal(res, exp_cat)
def test_unique_index_series(self):
c = Categorical([3, 1, 2, 2, 1], categories=[3, 2, 1])
# Categorical.unique sorts categories by appearance order
# if ordered=False
exp = Categorical([3, 1, 2], categories=[3, 1, 2])
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(Series(c).unique(), exp)
c = Categorical([1, 1, 2, 2], categories=[3, 2, 1])
exp = Categorical([1, 2], categories=[1, 2])
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(Series(c).unique(), exp)
c = Categorical([3, 1, 2, 2, 1], categories=[3, 2, 1], ordered=True)
# Categorical.unique keeps categories order if ordered=True
exp = Categorical([3, 1, 2], categories=[3, 2, 1], ordered=True)
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(Series(c).unique(), exp)
def test_shift(self):
# GH 9416
cat = Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = Categorical([np.nan, 'a', 'b', 'c', 'd'])
tm.assert_categorical_equal(sp1, xp1)
tm.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
tm.assert_categorical_equal(sn2, xp2)
tm.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
tm.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = Categorical([1, 2, 3])
exp = 3 + 3 * 8 # 3 int8s for values + 3 int64s for categories
assert cat.nbytes == exp
def test_memory_usage(self):
cat = Categorical([1, 2, 3])
# .categories is an index, so we include the hashtable
assert 0 < cat.nbytes <= cat.memory_usage()
assert 0 < cat.nbytes <= cat.memory_usage(deep=True)
cat = Categorical(['foo', 'foo', 'bar'])
assert cat.memory_usage(deep=True) > cat.nbytes
if not PYPY:
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
assert abs(diff) < 100
def test_map(self):
c = Categorical(list('ABABC'), categories=list('CBA'), ordered=True)
result = c.map(lambda x: x.lower())
exp = Categorical(list('ababc'), categories=list('cba'), ordered=True)
tm.assert_categorical_equal(result, exp)
c = Categorical(list('ABABC'), categories=list('ABC'), ordered=False)
result = c.map(lambda x: x.lower())
exp = Categorical(list('ababc'), categories=list('abc'), ordered=False)
tm.assert_categorical_equal(result, exp)
result = c.map(lambda x: 1)
# GH 12766: Return an index not an array
tm.assert_index_equal(result, Index(np.array([1] * 5, dtype=np.int64)))
def test_validate_inplace(self):
cat = Categorical(['A', 'B', 'B', 'C', 'A'])
invalid_values = [1, "True", [1, 2, 3], 5.0]
for value in invalid_values:
with pytest.raises(ValueError):
cat.set_ordered(value=True, inplace=value)
with pytest.raises(ValueError):
cat.as_ordered(inplace=value)
with pytest.raises(ValueError):
cat.as_unordered(inplace=value)
with pytest.raises(ValueError):
cat.set_categories(['X', 'Y', 'Z'], rename=True, inplace=value)
with pytest.raises(ValueError):
cat.rename_categories(['X', 'Y', 'Z'], inplace=value)
with pytest.raises(ValueError):
cat.reorder_categories(
['X', 'Y', 'Z'], ordered=True, inplace=value)
with pytest.raises(ValueError):
cat.add_categories(
new_categories=['D', 'E', 'F'], inplace=value)
with pytest.raises(ValueError):
cat.remove_categories(removals=['D', 'E', 'F'], inplace=value)
with pytest.raises(ValueError):
cat.remove_unused_categories(inplace=value)
with pytest.raises(ValueError):
cat.sort_values(inplace=value)
def test_repeat(self):
# GH10183
cat = Categorical(["a", "b"], categories=["a", "b"])
exp = Categorical(["a", "a", "b", "b"], categories=["a", "b"])
res = cat.repeat(2)
tm.assert_categorical_equal(res, exp)
def test_numpy_repeat(self):
cat = Categorical(["a", "b"], categories=["a", "b"])
exp = Categorical(["a", "a", "b", "b"], categories=["a", "b"])
tm.assert_categorical_equal(np.repeat(cat, 2), exp)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.repeat, cat, 2, axis=1)
def test_isna(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isna()
tm.assert_numpy_array_equal(res, exp)
|
bsd-3-clause
|
DimiterM/santander
|
make_dataframe.py
|
1
|
11741
|
"""
make_dataframe.py
Transforms the original Kaggle dataset (csv) to input dataset (csv) for the model
Params:
IMPUTE - True if the script should impute missing values
ISTEST - True for the test set, False for the train set
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--noimpute', dest='no_impute', default=False, action='store_true', help="True if the script should NOT impute missing values")
parser.add_argument('--test', dest='is_test', default=False, action='store_true', help="True for the test set, False for the train set")
args = parser.parse_args()
import pandas as pd
import numpy as np
IMPUTE = not args.no_impute
ISTEST = args.is_test
df = pd.DataFrame()
tr = pd.read_csv("./"+("test" if ISTEST else "train")+"_ver2.csv",
dtype={"age":str, "antiguedad":str, "indrel_1mes":str, "conyuemp":str})
df["t"] = 12 * (pd.DatetimeIndex(pd.to_datetime(tr["fecha_dato"],format="%Y-%m-%d")).year - 2015) + pd.DatetimeIndex(pd.to_datetime(tr["fecha_dato"],format="%Y-%m-%d")).month
df["t_month"] = pd.DatetimeIndex(pd.to_datetime(tr["fecha_dato"],format="%Y-%m-%d")).month
tr.drop(["fecha_dato"], axis=1, inplace=True)
df["id"] = tr["ncodpers"]
tr.drop(["ncodpers"], axis=1, inplace=True)
# df["employee"] = tr["ind_empleado"]
# df.loc[df["employee"] == "S", "employee"] = "N"
# tr.drop(["ind_empleado"], axis=1, inplace=True)
# df["is_spouse"] = tr["conyuemp"].map({'S': 1, 'N': 0})
# df.loc[df["is_spouse"] == 1, "employee"] = "M"
# tr.drop(["conyuemp"], axis=1, inplace=True)
# df.drop(["is_spouse"], axis=1, inplace=True)
df["employee"] = tr["ind_empleado"]
df.loc[(df["employee"] == "S") | (df["employee"].isnull()), "employee"] = "N"
tr.drop(["ind_empleado", "conyuemp"], axis=1, inplace=True)
df["country"] = tr["pais_residencia"]
tr.drop(["pais_residencia"], axis=1, inplace=True)
df["sex"] = tr["sexo"].map({'H': 1, 'V': 0})
tr.drop(["sexo"], axis=1, inplace=True)
df["age"] = pd.to_numeric(tr["age"], downcast="integer", errors="coerce") ## floats ?!
tr.drop(["age"], axis=1, inplace=True)
df["seniority_new"] = pd.to_numeric(tr["ind_nuevo"], downcast="integer", errors="coerce") ## floats ?!
tr.drop(["ind_nuevo"], axis=1, inplace=True)
df["seniority"] = pd.to_numeric(tr["antiguedad"], downcast="integer", errors="coerce") ## floats ?!
tr.drop(["antiguedad", "fecha_alta"], axis=1, inplace=True)
df["is_primary"] = tr["indrel"]
df.loc[df["is_primary"] == 99, "is_primary"] = 0
# df["last_day_primary"] = pd.DatetimeIndex(pd.to_datetime(tr["ult_fec_cli_1t"],format="%Y-%m-%d")).day
tr.drop(["indrel", "ult_fec_cli_1t"], axis=1, inplace=True)
df["customer_type"] = tr["indrel_1mes"].str.replace('.0', '').replace('P', '0')
tr.drop(["indrel_1mes"], axis=1, inplace=True)
df["customer_rel"] = tr["tiprel_1mes"]
df.loc[df["customer_rel"] == "N", "customer_rel"] = "A"
tr.drop(["tiprel_1mes"], axis=1, inplace=True)
df["is_domestic"] = tr["indresi"].map({'S': 1, 'N': 0})
tr.drop(["indresi"], axis=1, inplace=True)
df["is_foreigner"] = tr["indext"].map({'S': 1, 'N': 0})
tr.drop(["indext"], axis=1, inplace=True)
df["is_dead"] = tr["indfall"].map({'S': 1, 'N': 0})
tr.drop(["indfall"], axis=1, inplace=True)
# df["channel"] = tr["canal_entrada"]
# tr.drop(["canal_entrada"], axis=1, inplace=True)
tr["canal_entrada_0"] = tr["canal_entrada"].str.slice(0,1)
tr["canal_entrada_1"] = tr["canal_entrada"].str.slice(1,2)
tr["canal_entrada_2"] = tr["canal_entrada"].str.slice(2,3)
tr["channel_0"] = 0
tr["channel_1"] = 0
tr["channel_2"] = 0
tr.loc[tr["canal_entrada"] == "RED", "channel_0"] = 2
tr.loc[~(tr["canal_entrada"].isnull()) & (tr["canal_entrada"].str.startswith("0")), "channel_0"] = 1
tr.loc[~(tr["canal_entrada"].isnull()) & (tr["canal_entrada"].str.startswith("0")), "channel_1"] = tr["canal_entrada"].map({'004': 0, '007': 1, '013': 2, '025': 3})
tr.loc[(tr["canal_entrada_0"] == "K") & (tr["canal_entrada_1"] != "0"), "channel_1"] = tr["canal_entrada_1"].map(
{c: int(c, 36) - 9 for c in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"})
tr.loc[(tr["canal_entrada_0"] == "K") & (tr["canal_entrada_2"] != "0"), "channel_2"] = tr["canal_entrada_2"].map(
{c: int(c, 36) - 9 for c in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"})
tr.loc[tr["canal_entrada"].isnull(), "channel_0"] = np.nan
tr.loc[tr["canal_entrada"].isnull(), "channel_1"] = np.nan
tr.loc[tr["canal_entrada"].isnull(), "channel_2"] = np.nan
df["channel_0"] = tr["channel_0"]
df["channel_1"] = tr["channel_1"]
df["channel_2"] = tr["channel_2"]
tr.drop(["canal_entrada", "canal_entrada_0", "canal_entrada_1", "canal_entrada_2", "channel_0", "channel_1", "channel_2"], axis=1, inplace=True)
df["province"] = pd.to_numeric(tr["cod_prov"], downcast="integer", errors="coerce") ## floats ?!
tr.drop(["cod_prov", "nomprov", "tipodom"], axis=1, inplace=True)
df["is_active"] = pd.to_numeric(tr["ind_actividad_cliente"], downcast="integer", errors="coerce") ## floats ?!
tr.drop(["ind_actividad_cliente"], axis=1, inplace=True)
df["income"] = tr["renta"]
tr.drop(["renta"], axis=1, inplace=True)
if ISTEST:
df["income"] = pd.to_numeric(df["income"].str.replace('NA', '').replace(' ', ''), downcast="float", errors="coerce") ## for testset
df["segment"] = pd.to_numeric(tr["segmento"].str.slice(1,2), downcast="integer", errors="coerce") ## floats ?!
tr.drop(["segmento"], axis=1, inplace=True)
if not ISTEST:
tr.columns = [
"y_1_saving", "y_2_guarantees", "y_3_current", "y_4_derivate", "y_5_payroll", "y_6_junior",
"y_7_particular_M", "y_8_particular", "y_9_particular_P", "y_10_deposit_S", "y_11_deposit_M",
"y_12_deposit_L", "y_13_eacc", "y_14_funds", "y_15_mortgage", "y_16_pensions", "y_17_loans",
"y_18_taxes", "y_19_creditcard", "y_20_securities", "y_21_homeacc",
"y_22_payroll_2", "y_23_pensions_2", "y_24_direct"]
tr.fillna(value=0, inplace=True)
df = pd.concat([df, tr], axis=1)
if IMPUTE:
df.isnull().any()
for c in [
'employee', 'country', 'sex', 'seniority_new', 'is_primary', 'customer_type', 'customer_rel',
'is_domestic', 'is_foreigner', 'is_dead', 'channel_0', 'channel_1', 'channel_2',
'province', 'is_active', 'segment']:
df[c].fillna(value=df[c].mode().iloc[0], inplace=True)
for c in ['age', 'seniority']:
df[c].fillna(value=df[c].mean(), inplace=True)
df['income'].fillna(value=df['income'].mean(), inplace=True)
df.isnull().any()
df.to_csv(path_or_buf="./"+("test" if ISTEST else "")+"df.csv", index=False)
#df = pd.read_csv("./"+("test" if ISTEST else "")+"df.csv")
catdf = pd.DataFrame()
catdf["id"] = df["id"]
catdf["employee"] = df["employee"]
catdf["employee_bit_notN"] = 0
catdf.loc[catdf["employee"] != 'N', "employee_bit_notN"] = 1
catdf.drop(["employee"], axis=1, inplace=True)
catdf["country"] = df["country"]
catdf["country_num"] = 0.0
catdf.loc[catdf["country"] == 'ES', "country_num"] = 1.0
catdf.loc[catdf["country"].isin(['AT', 'BE', 'BG', 'CY', 'CZ', 'DK', 'EE', 'FI', 'FR', 'DE', 'GR', 'HU', 'IE', 'IT', 'LV', 'LT', 'LU', 'MT', 'NL', 'PL', 'PT', 'RO', 'SK', 'SI', 'SE', 'GB', 'HR']), "country_num"] = 0.5
catdf.drop(["country"], axis=1, inplace=True)
catdf["customer_type"] = df["customer_type"]
catdf["customer_type_bit_not1"] = 0
catdf.loc[catdf["customer_type"] != 1, "customer_type_bit_not1"] = 1
catdf.drop(["customer_type"], axis=1, inplace=True)
catdf["customer_rel"] = df["customer_rel"]
catdf["customer_rel_I"] = 0
catdf["customer_rel_A"] = 0
catdf.loc[(catdf["customer_rel"] == 'I') | (catdf["customer_rel"] == 'P'), "customer_rel_I"] = 1
catdf.loc[(catdf["customer_rel"] == 'A') | (catdf["customer_rel"] == 'R'), "customer_rel_A"] = 1
catdf.drop(["customer_rel"], axis=1, inplace=True)
dummies = pd.get_dummies(df["channel_0"], prefix="channel_0")
dummies.columns = [cn.replace(".0", "") for cn in dummies.columns.tolist()]
catdf = pd.concat([catdf, dummies], axis=1)
dummies = pd.get_dummies(df["channel_1"], prefix="channel_1")
dummies.columns = [cn.replace(".0", "") for cn in dummies.columns.tolist()]
catdf = pd.concat([catdf, dummies], axis=1)
# dummies = pd.get_dummies(df["channel_2"], prefix="channel_2")
# dummies.columns = [cn.replace(".0", "") for cn in dummies.columns.tolist()]
# catdf = pd.concat([catdf, dummies], axis=1)
#
# dummies = pd.get_dummies(df["province"], prefix="province")
# dummies.columns = [cn.replace(".0", "") for cn in dummies.columns.tolist()]
# catdf = pd.concat([catdf, dummies], axis=1)
catdf["province_code"] = df["province"].map({
51: 'AFR', 52: 'AFR',
4: 'AND', 11: 'AND', 14: 'AND', 18: 'AND', 21: 'AND', 23: 'AND', 29: 'AND', 41: 'AND',
22: 'ARA', 44: 'ARA', 50: 'ARA', 33: 'AST', 7: 'BAL', 1: 'BAS', 48: 'BAS', 20: 'BAS',
35: 'CAN', 38: 'CAN', 5: 'CAS', 9: 'CAS', 24: 'CAS', 34: 'CAS', 37: 'CAS', 40: 'CAS', 42: 'CAS', 47: 'CAS', 49: 'CAS',
8: 'CAT', 17: 'CAT', 25: 'CAT', 43: 'CAT', 39: 'CNB', 6: 'EXT', 10: 'EXT',
15: 'GAL', 27: 'GAL', 32: 'GAL', 36: 'GAL', 28: 'MAD', 2: 'MAN', 13: 'MAN', 16: 'MAN', 19: 'MAN', 45: 'MAN',
30: 'MUR', 31: 'NAV', 26: 'RIO', 3: 'VAL', 12: 'VAL', 46: 'VAL'})
dummies = pd.get_dummies(catdf["province_code"], prefix="province")
catdf = pd.concat([catdf, dummies], axis=1)
catdf.drop(["province_code"], axis=1, inplace=True)
catdf["province_pop"] = df["province"].map({
52.0 : 0.0128, 51.0 : 0.0129, 42.0 : 0.0143, 44.0 : 0.0218, 40.0 : 0.0248, 5.0 : 0.0259,
34.0 : 0.0260, 49.0 : 0.0289, 16.0 : 0.0326, 22.0 : 0.0348, 19.0 : 0.0396, 1.0 : 0.0494,
26.0 : 0.0495, 32.0 : 0.0502, 37.0 : 0.0531, 27.0 : 0.0532, 9.0 : 0.0571, 2.0 : 0.0615, 10.0 : 0.0631,
25.0 : 0.0678, 24.0 : 0.0753, 21.0 : 0.0801, 13.0 : 0.0808, 47.0 : 0.0819, 39.0 : 0.0911,
12.0 : 0.0926, 31.0 : 0.0992, 23.0 : 0.1023, 6.0 : 0.1068, 4.0 : 0.1076, 45.0 : 0.1087,
20.0 : 0.1098, 17.0 : 0.1172, 14.0 : 0.1235, 43.0 : 0.1247, 18.0 : 0.1415, 36.0 : 0.1470,
50.0 : 0.1506, 38.0 : 0.1562, 33.0 : 0.1644, 35.0 : 0.1699, 7.0 : 0.1711, 15.0 : 0.1752,
48.0 : 0.1780, 11.0 : 0.1906, 30.0 : 0.2266, 29.0 : 0.2544, 41.0 : 0.2989, 3.0 : 0.2995, 46.0 : 0.3951, 8.0 : 0.8530, 28.0 : 1.0000})
dummies = pd.get_dummies(df["segment"], prefix="segment")
dummies.columns = [cn.replace(".0", "") for cn in dummies.columns.tolist()]
catdf = pd.concat([catdf, dummies], axis=1)
df = df[["t", "t_month", "id", "sex", "age", "seniority_new", "seniority", "is_primary", "is_domestic", "is_foreigner", "is_dead", "is_active", "income"] + (["y_1_saving", "y_2_guarantees", "y_3_current", "y_4_derivate", "y_5_payroll", "y_6_junior", "y_7_particular_M", "y_8_particular", "y_9_particular_P", "y_10_deposit_S", "y_11_deposit_M", "y_12_deposit_L", "y_13_eacc", "y_14_funds", "y_15_mortgage", "y_16_pensions", "y_17_loans", "y_18_taxes", "y_19_creditcard", "y_20_securities", "y_21_homeacc", "y_22_payroll_2", "y_23_pensions_2", "y_24_direct"] if not ISTEST else [])]
catdf_cols = catdf.columns.tolist()[1:]
catdf = catdf[catdf_cols]
catdf = pd.concat([catdf, df], axis=1) # catdf = pd.merge(catdf, df, how="outer", on=["id"])
catdf = catdf[["t", "t_month", "id", "sex", "age", "seniority_new", "seniority", "is_primary", "is_domestic", "is_foreigner", "is_dead", "is_active", "income"] + catdf_cols + (["y_1_saving", "y_2_guarantees", "y_3_current", "y_4_derivate", "y_5_payroll", "y_6_junior", "y_7_particular_M", "y_8_particular", "y_9_particular_P", "y_10_deposit_S", "y_11_deposit_M", "y_12_deposit_L", "y_13_eacc", "y_14_funds", "y_15_mortgage", "y_16_pensions", "y_17_loans", "y_18_taxes", "y_19_creditcard", "y_20_securities", "y_21_homeacc", "y_22_payroll_2", "y_23_pensions_2", "y_24_direct"] if not ISTEST else [])]
catdf.to_csv(path_or_buf="./"+("test" if ISTEST else "")+"catdf.csv", index=False)
|
mit
|
gfyoung/pandas
|
pandas/tests/frame/methods/test_interpolate.py
|
1
|
11900
|
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame, Series, date_range
import pandas._testing as tm
class TestDataFrameInterpolate:
def test_interp_basic(self):
df = DataFrame(
{
"A": [1, 2, np.nan, 4],
"B": [1, 4, 9, np.nan],
"C": [1, 2, 3, 5],
"D": list("abcd"),
}
)
expected = DataFrame(
{
"A": [1.0, 2.0, 3.0, 4.0],
"B": [1.0, 4.0, 9.0, 9.0],
"C": [1, 2, 3, 5],
"D": list("abcd"),
}
)
result = df.interpolate()
tm.assert_frame_equal(result, expected)
result = df.set_index("C").interpolate()
expected = df.set_index("C")
expected.loc[3, "A"] = 3
expected.loc[5, "B"] = 9
tm.assert_frame_equal(result, expected)
def test_interp_empty(self):
# https://github.com/pandas-dev/pandas/issues/35598
df = DataFrame()
result = df.interpolate()
assert result is not df
expected = df
tm.assert_frame_equal(result, expected)
def test_interp_bad_method(self):
df = DataFrame(
{
"A": [1, 2, np.nan, 4],
"B": [1, 4, 9, np.nan],
"C": [1, 2, 3, 5],
"D": list("abcd"),
}
)
msg = (
r"method must be one of \['linear', 'time', 'index', 'values', "
r"'nearest', 'zero', 'slinear', 'quadratic', 'cubic', "
r"'barycentric', 'krogh', 'spline', 'polynomial', "
r"'from_derivatives', 'piecewise_polynomial', 'pchip', 'akima', "
r"'cubicspline'\]. Got 'not_a_method' instead."
)
with pytest.raises(ValueError, match=msg):
df.interpolate(method="not_a_method")
def test_interp_combo(self):
df = DataFrame(
{
"A": [1.0, 2.0, np.nan, 4.0],
"B": [1, 4, 9, np.nan],
"C": [1, 2, 3, 5],
"D": list("abcd"),
}
)
result = df["A"].interpolate()
expected = Series([1.0, 2.0, 3.0, 4.0], name="A")
tm.assert_series_equal(result, expected)
result = df["A"].interpolate(downcast="infer")
expected = Series([1, 2, 3, 4], name="A")
tm.assert_series_equal(result, expected)
def test_interp_nan_idx(self):
df = DataFrame({"A": [1, 2, np.nan, 4], "B": [np.nan, 2, 3, 4]})
df = df.set_index("A")
msg = (
"Interpolation with NaNs in the index has not been implemented. "
"Try filling those NaNs before interpolating."
)
with pytest.raises(NotImplementedError, match=msg):
df.interpolate(method="values")
@td.skip_if_no_scipy
def test_interp_various(self):
df = DataFrame(
{"A": [1, 2, np.nan, 4, 5, np.nan, 7], "C": [1, 2, 3, 5, 8, 13, 21]}
)
df = df.set_index("C")
expected = df.copy()
result = df.interpolate(method="polynomial", order=1)
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923076
tm.assert_frame_equal(result, expected)
result = df.interpolate(method="cubic")
# GH #15662.
expected.A.loc[3] = 2.81547781
expected.A.loc[13] = 5.52964175
tm.assert_frame_equal(result, expected)
result = df.interpolate(method="nearest")
expected.A.loc[3] = 2
expected.A.loc[13] = 5
tm.assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method="quadratic")
expected.A.loc[3] = 2.82150771
expected.A.loc[13] = 6.12648668
tm.assert_frame_equal(result, expected)
result = df.interpolate(method="slinear")
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923077
tm.assert_frame_equal(result, expected)
result = df.interpolate(method="zero")
expected.A.loc[3] = 2.0
expected.A.loc[13] = 5
tm.assert_frame_equal(result, expected, check_dtype=False)
@td.skip_if_no_scipy
def test_interp_alt_scipy(self):
df = DataFrame(
{"A": [1, 2, np.nan, 4, 5, np.nan, 7], "C": [1, 2, 3, 5, 8, 13, 21]}
)
result = df.interpolate(method="barycentric")
expected = df.copy()
expected.loc[2, "A"] = 3
expected.loc[5, "A"] = 6
tm.assert_frame_equal(result, expected)
result = df.interpolate(method="barycentric", downcast="infer")
tm.assert_frame_equal(result, expected.astype(np.int64))
result = df.interpolate(method="krogh")
expectedk = df.copy()
expectedk["A"] = expected["A"]
tm.assert_frame_equal(result, expectedk)
result = df.interpolate(method="pchip")
expected.loc[2, "A"] = 3
expected.loc[5, "A"] = 6.0
tm.assert_frame_equal(result, expected)
def test_interp_rowwise(self):
df = DataFrame(
{
0: [1, 2, np.nan, 4],
1: [2, 3, 4, np.nan],
2: [np.nan, 4, 5, 6],
3: [4, np.nan, 6, 7],
4: [1, 2, 3, 4],
}
)
result = df.interpolate(axis=1)
expected = df.copy()
expected.loc[3, 1] = 5
expected.loc[0, 2] = 3
expected.loc[1, 3] = 3
expected[4] = expected[4].astype(np.float64)
tm.assert_frame_equal(result, expected)
result = df.interpolate(axis=1, method="values")
tm.assert_frame_equal(result, expected)
result = df.interpolate(axis=0)
expected = df.interpolate()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"axis_name, axis_number",
[
pytest.param("rows", 0, id="rows_0"),
pytest.param("index", 0, id="index_0"),
pytest.param("columns", 1, id="columns_1"),
],
)
def test_interp_axis_names(self, axis_name, axis_number):
# GH 29132: test axis names
data = {0: [0, np.nan, 6], 1: [1, np.nan, 7], 2: [2, 5, 8]}
df = DataFrame(data, dtype=np.float64)
result = df.interpolate(axis=axis_name, method="linear")
expected = df.interpolate(axis=axis_number, method="linear")
tm.assert_frame_equal(result, expected)
def test_rowwise_alt(self):
df = DataFrame(
{
0: [0, 0.5, 1.0, np.nan, 4, 8, np.nan, np.nan, 64],
1: [1, 2, 3, 4, 3, 2, 1, 0, -1],
}
)
df.interpolate(axis=0)
# TODO: assert something?
@pytest.mark.parametrize(
"check_scipy", [False, pytest.param(True, marks=td.skip_if_no_scipy)]
)
def test_interp_leading_nans(self, check_scipy):
df = DataFrame(
{"A": [np.nan, np.nan, 0.5, 0.25, 0], "B": [np.nan, -3, -3.5, np.nan, -4]}
)
result = df.interpolate()
expected = df.copy()
expected["B"].loc[3] = -3.75
tm.assert_frame_equal(result, expected)
if check_scipy:
result = df.interpolate(method="polynomial", order=1)
tm.assert_frame_equal(result, expected)
def test_interp_raise_on_only_mixed(self, axis):
df = DataFrame(
{
"A": [1, 2, np.nan, 4],
"B": ["a", "b", "c", "d"],
"C": [np.nan, 2, 5, 7],
"D": [np.nan, np.nan, 9, 9],
"E": [1, 2, 3, 4],
}
)
msg = (
"Cannot interpolate with all object-dtype columns "
"in the DataFrame. Try setting at least one "
"column to a numeric dtype."
)
with pytest.raises(TypeError, match=msg):
df.astype("object").interpolate(axis=axis)
def test_interp_raise_on_all_object_dtype(self):
# GH 22985
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, dtype="object")
msg = (
"Cannot interpolate with all object-dtype columns "
"in the DataFrame. Try setting at least one "
"column to a numeric dtype."
)
with pytest.raises(TypeError, match=msg):
df.interpolate()
def test_interp_inplace(self):
df = DataFrame({"a": [1.0, 2.0, np.nan, 4.0]})
expected = DataFrame({"a": [1.0, 2.0, 3.0, 4.0]})
result = df.copy()
return_value = result["a"].interpolate(inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result["a"].interpolate(inplace=True, downcast="infer")
assert return_value is None
tm.assert_frame_equal(result, expected.astype("int64"))
def test_interp_inplace_row(self):
# GH 10395
result = DataFrame(
{"a": [1.0, 2.0, 3.0, 4.0], "b": [np.nan, 2.0, 3.0, 4.0], "c": [3, 2, 2, 2]}
)
expected = result.interpolate(method="linear", axis=1, inplace=False)
return_value = result.interpolate(method="linear", axis=1, inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
def test_interp_ignore_all_good(self):
# GH
df = DataFrame(
{
"A": [1, 2, np.nan, 4],
"B": [1, 2, 3, 4],
"C": [1.0, 2.0, np.nan, 4.0],
"D": [1.0, 2.0, 3.0, 4.0],
}
)
expected = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="float64"),
"B": np.array([1, 2, 3, 4], dtype="int64"),
"C": np.array([1.0, 2.0, 3, 4.0], dtype="float64"),
"D": np.array([1.0, 2.0, 3.0, 4.0], dtype="float64"),
}
)
result = df.interpolate(downcast=None)
tm.assert_frame_equal(result, expected)
# all good
result = df[["B", "D"]].interpolate(downcast=None)
tm.assert_frame_equal(result, df[["B", "D"]])
def test_interp_time_inplace_axis(self, axis):
# GH 9687
periods = 5
idx = date_range(start="2014-01-01", periods=periods)
data = np.random.rand(periods, periods)
data[data < 0.5] = np.nan
expected = DataFrame(index=idx, columns=idx, data=data)
result = expected.interpolate(axis=0, method="time")
return_value = expected.interpolate(axis=0, method="time", inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("axis_name, axis_number", [("index", 0), ("columns", 1)])
def test_interp_string_axis(self, axis_name, axis_number):
# https://github.com/pandas-dev/pandas/issues/25190
x = np.linspace(0, 100, 1000)
y = np.sin(x)
df = DataFrame(
data=np.tile(y, (10, 1)), index=np.arange(10), columns=x
).reindex(columns=x * 1.005)
result = df.interpolate(method="linear", axis=axis_name)
expected = df.interpolate(method="linear", axis=axis_number)
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) support axis=1
@pytest.mark.parametrize("method", ["ffill", "bfill", "pad"])
def test_interp_fillna_methods(self, axis, method):
# GH 12918
df = DataFrame(
{
"A": [1.0, 2.0, 3.0, 4.0, np.nan, 5.0],
"B": [2.0, 4.0, 6.0, np.nan, 8.0, 10.0],
"C": [3.0, 6.0, 9.0, np.nan, np.nan, 30.0],
}
)
expected = df.fillna(axis=axis, method=method)
result = df.interpolate(method=method, axis=axis)
tm.assert_frame_equal(result, expected)
|
bsd-3-clause
|
LiaoPan/scikit-learn
|
sklearn/linear_model/tests/test_ransac.py
|
216
|
13290
|
import numpy as np
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises_regexp
from scipy import sparse
from sklearn.utils.testing import assert_less
from sklearn.linear_model import LinearRegression, RANSACRegressor
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_resid_thresh_no_inliers():
# When residual_threshold=0.0 there are no inliers and a
# ValueError with a message should be raised
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.0, random_state=0)
assert_raises_regexp(ValueError,
"No inliers.*residual_threshold.*0\.0",
ransac_estimator.fit, X, y)
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
|
bsd-3-clause
|
bgris/ODL_bgris
|
lib/python3.5/site-packages/matplotlib/sphinxext/mathmpl.py
|
12
|
3822
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import sys
from hashlib import md5
from docutils import nodes
from docutils.parsers.rst import directives
import warnings
from matplotlib import rcParams
from matplotlib.mathtext import MathTextParser
rcParams['mathtext.fontset'] = 'cm'
mathtext_parser = MathTextParser("Bitmap")
# Define LaTeX math node:
class latex_math(nodes.General, nodes.Element):
pass
def fontset_choice(arg):
return directives.choice(arg, ['cm', 'stix', 'stixsans'])
options_spec = {'fontset': fontset_choice}
def math_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
i = rawtext.find('`')
latex = rawtext[i+1:-1]
node = latex_math(rawtext)
node['latex'] = latex
node['fontset'] = options.get('fontset', 'cm')
return [node], []
math_role.options = options_spec
def math_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
latex = ''.join(content)
node = latex_math(block_text)
node['latex'] = latex
node['fontset'] = options.get('fontset', 'cm')
return [node]
# This uses mathtext to render the expression
def latex2png(latex, filename, fontset='cm'):
latex = "$%s$" % latex
orig_fontset = rcParams['mathtext.fontset']
rcParams['mathtext.fontset'] = fontset
if os.path.exists(filename):
depth = mathtext_parser.get_depth(latex, dpi=100)
else:
try:
depth = mathtext_parser.to_png(filename, latex, dpi=100)
except:
warnings.warn("Could not render math expression %s" % latex,
Warning)
depth = 0
rcParams['mathtext.fontset'] = orig_fontset
sys.stdout.write("#")
sys.stdout.flush()
return depth
# LaTeX to HTML translation stuff:
def latex2html(node, source):
inline = isinstance(node.parent, nodes.TextElement)
latex = node['latex']
name = 'math-%s' % md5(latex.encode()).hexdigest()[-10:]
destdir = os.path.join(setup.app.builder.outdir, '_images', 'mathmpl')
if not os.path.exists(destdir):
os.makedirs(destdir)
dest = os.path.join(destdir, '%s.png' % name)
path = '/'.join((setup.app.builder.imgpath, 'mathmpl'))
depth = latex2png(latex, dest, node['fontset'])
if inline:
cls = ''
else:
cls = 'class="center" '
if inline and depth != 0:
style = 'style="position: relative; bottom: -%dpx"' % (depth + 1)
else:
style = ''
return '<img src="%s/%s.png" %s%s/>' % (path, name, cls, style)
def setup(app):
setup.app = app
# Add visit/depart methods to HTML-Translator:
def visit_latex_math_html(self, node):
source = self.document.attributes['source']
self.body.append(latex2html(node, source))
def depart_latex_math_html(self, node):
pass
# Add visit/depart methods to LaTeX-Translator:
def visit_latex_math_latex(self, node):
inline = isinstance(node.parent, nodes.TextElement)
if inline:
self.body.append('$%s$' % node['latex'])
else:
self.body.extend(['\\begin{equation}',
node['latex'],
'\\end{equation}'])
def depart_latex_math_latex(self, node):
pass
app.add_node(latex_math,
html=(visit_latex_math_html, depart_latex_math_html),
latex=(visit_latex_math_latex, depart_latex_math_latex))
app.add_role('math', math_role)
app.add_directive('math', math_directive,
True, (0, 0, 0), **options_spec)
metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
return metadata
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.