repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
phdowling/scikit-learn | benchmarks/bench_sparsify.py | 323 | 3372 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
google/trimmed_match | trimmed_match/design/util.py | 1 | 22545 | # Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities functions to manipulate the data in the colab."""
import datetime
import itertools
import operator
from typing import List, Optional
import dataclasses
import numpy as np
import pandas as pd
import pandas.io.formats.style as style
from scipy import stats
from trimmed_match.design import common_classes
TimeWindow = common_classes.TimeWindow
FormatOptions = common_classes.FormatOptions
_operator_functions = {'>': operator.gt,
'<': operator.lt,
'<=': operator.le,
'>=': operator.ge,
'=': operator.eq,
'!=': operator.ne}
_inverse_op = {'<': '>', '<=': '>=', '>': '<', '>=': '<=', '=': '!='}
@dataclasses.dataclass
class CalculateMinDetectableIroas:
"""Class for the calculation of the minimum detectable iROAS.
Hypothesis testing for H0: iROAS=0 vs H1: iROAS>=min_detectable_iroas based
on one sample X which follows a normal distribution with mean iROAS (unknown)
and standard deviation rmse (known).
Typical usage example:
calc_min_detectable_iroas = CalculateMinDetectableIroas(0.1, 0.9)
min_detectable_iroas = calc_min_detectable_iroas.at(2.0)
"""
# chance of rejecting H0 incorrectly when H0 holds.
significance_level: float = 0.1
# chance of rejecting H0 correctly when H1 holds.
power_level: float = 0.9
# minimum detectable iroas at rmse=1.
rmse_multiplier: float = dataclasses.field(init=False)
def __post_init__(self):
"""Calculates rmse_multiplier.
Raises:
ValueError: if significance_level or power_level is not in (0, 1).
"""
if self.significance_level <= 0 or self.significance_level >= 1.0:
raise ValueError('significance_level must be in (0, 1), but got '
f'{self.significance_level}.')
if self.power_level <= 0 or self.power_level >= 1.0:
raise ValueError('power_level must be in (0, 1), but got '
f'{self.power_level}.')
self.rmse_multiplier = (
stats.norm.ppf(self.power_level) +
stats.norm.ppf(1 - self.significance_level))
def at(self, rmse: float) -> float:
"""Calculates min_detectable_iroas at the specified rmse."""
return rmse * self.rmse_multiplier
def find_days_to_exclude(
dates_to_exclude: List[str]) -> List[TimeWindow]:
"""Returns a list of time windows to exclude from a list of days and weeks.
Args:
dates_to_exclude: a List of strings with format indicating a single day as
'2020/01/01' (YYYY/MM/DD) or an entire time period as
'2020/01/01 - 2020/02/01' (indicating start and end date of the time period)
Returns:
days_exclude: a List of TimeWindows obtained from the list in input.
"""
days_exclude = []
for x in dates_to_exclude:
tmp = x.split('-')
if len(tmp) == 1:
try:
days_exclude.append(
TimeWindow(pd.Timestamp(tmp[0]), pd.Timestamp(tmp[0])))
except ValueError:
raise ValueError(f'Cannot convert the string {tmp[0]} to a valid date.')
elif len(tmp) == 2:
try:
days_exclude.append(
TimeWindow(pd.Timestamp(tmp[0]), pd.Timestamp(tmp[1])))
except ValueError:
raise ValueError(
f'Cannot convert the strings in {tmp} to a valid date.')
else:
raise ValueError(f'The input {tmp} cannot be interpreted as a single' +
' day or a time window')
return days_exclude
def expand_time_windows(periods: List[TimeWindow]) -> List[pd.Timestamp]:
"""Return a list of days to exclude from a list of TimeWindows.
Args:
periods: List of time windows (first day, last day).
Returns:
days_exclude: a List of obtained by expanding the list in input.
"""
days_exclude = []
for window in periods:
days_exclude += pd.date_range(window.first_day, window.last_day, freq='D')
return list(set(days_exclude))
def overlap_percent(dates_left: List['datetime.datetime'],
dates_right: List['datetime.datetime']) -> float:
"""Find the size of the intersections of two arrays, relative to the first array.
Args:
dates_left: List of datetime.datetime
dates_right: List of datetime.datetime
Returns:
percentage: the percentage of elements of dates_right that also appear in
dates_left
"""
intersection = np.intersect1d(dates_left, dates_right)
percentage = 100 * len(intersection) / len(dates_right)
return percentage
def check_time_periods(geox_data: pd.DataFrame,
start_date_eval: pd.Timestamp,
start_date_aa_test: pd.Timestamp,
experiment_duration_weeks: int,
frequency: str) -> bool:
"""Checks that the geox_data contains the data for the two periods.
Check that the geox_data contains all observations during the evaluation and
AA test periods to guarantee that the experiment lasts exactly a certain
number of days/weeks, depending on the frequency of the data (daily/weekly).
Args:
geox_data: pd.Dataframe with at least the columns (date, geo).
start_date_eval: start date of the evaluation period.
start_date_aa_test: start date of the aa test period.
experiment_duration_weeks: int, length of the experiment in weeks.
frequency: str indicating the frequency of the time series. It should be one
of 'infer', 'D', 'W'.
Returns:
bool: a bool, True if the time periods specified pass all the checks
Raises:
ValueError: if part of the evaluation or AA test period are shorter than
experiment_duration (either weeks or days).
"""
if frequency not in ['infer', 'D', 'W']:
raise ValueError(
f'frequency should be one of ["infer", "D", "W"], got {frequency}')
if frequency == 'infer':
tmp = geox_data.copy().set_index(['date', 'geo'])
frequency = infer_frequency(tmp, 'date', 'geo')
if frequency == 'W':
frequency = '7D'
number_of_observations = experiment_duration_weeks
else:
number_of_observations = 7 * experiment_duration_weeks
freq_str = 'weeks' if frequency == '7D' else 'days'
missing_eval = find_missing_dates(geox_data, start_date_eval,
experiment_duration_weeks,
number_of_observations, frequency)
if missing_eval:
raise ValueError(
(f'The evaluation period contains the following {freq_str} ' +
f'{missing_eval} for which we do not have data.'))
missing_aa_test = find_missing_dates(geox_data, start_date_aa_test,
experiment_duration_weeks,
number_of_observations, frequency)
if missing_aa_test:
raise ValueError((f'The AA test period contains the following {freq_str} ' +
f'{missing_aa_test} for which we do not have data.'))
return True
def find_missing_dates(geox_data: pd.DataFrame, start_date: pd.Timestamp,
period_duration_weeks: int,
number_of_observations: int,
frequency: str) -> List[str]:
"""Find missing observations in a time period.
Args:
geox_data: pd.Dataframe with at least the columns (date, geo).
start_date: start date of the evaluation period.
period_duration_weeks: int, length of the period in weeks.
number_of_observations: expected number of time points.
frequency: str or pd.DateOffset indicating the frequency of the time series.
Returns:
missing: a list of strings, containing the dates for which data are missing
in geox_data.
"""
days = datetime.timedelta(days=7 * period_duration_weeks - 1)
period_dates = ((geox_data['date'] >= start_date) &
(geox_data['date'] <= start_date + days))
days_in_period = geox_data.loc[
period_dates, 'date'].drop_duplicates().dt.strftime('%Y-%m-%d').to_list()
missing = np.array([])
if len(days_in_period) != number_of_observations:
expected_observations = list(
pd.date_range(start_date, start_date + days,
freq=frequency).strftime('%Y-%m-%d'))
missing = set(expected_observations) - set(days_in_period)
return sorted(missing)
def infer_frequency(data: pd.DataFrame, date_index: str,
series_index: str) -> str:
"""Infers frequency of data from pd.DataFrame with multiple indices.
Infers frequency of data from pd.DataFrame with two indices, one for the slice
name and one for the date-time.
Example:
df = pd.Dataframe{'date': [2020-10-10, 2020-10-11], 'geo': [1, 1],
'response': [10, 20]}
df.set_index(['geo', 'date'], inplace=True)
infer_frequency(df, 'date', 'geo')
Args:
data: a pd.DataFrame for which frequency needs to be inferred.
date_index: string containing the name of the time index.
series_index: string containing the name of the series index.
Returns:
A str, either 'D' or 'W' indicating the most likely frequency inferred
from the data.
Raises:
ValueError: if it is not possible to infer frequency of sampling from the
provided pd.DataFrame.
"""
data = data.sort_values(by=[date_index, series_index])
# Infer most likely frequence for each series_index
series_names = data.index.get_level_values(series_index).unique().tolist()
series_frequencies = []
for series in series_names:
observed_times = data.iloc[data.index.get_level_values(series_index) ==
series].index.get_level_values(date_index)
n_steps = len(observed_times)
if n_steps > 1:
time_diffs = (
observed_times[1:n_steps] -
observed_times[0:(n_steps - 1)]).astype('timedelta64[D]').values
modal_frequency, _ = np.unique(time_diffs, return_counts=True)
series_frequencies.append(modal_frequency[0])
if not series_frequencies:
raise ValueError(
'At least one series with more than one observation must be provided.')
if series_frequencies.count(series_frequencies[0]) != len(series_frequencies):
raise ValueError(
'The provided time series seem to have irregular frequencies.')
try:
frequency = {
1: 'D',
7: 'W'
}[series_frequencies[0]]
except KeyError:
raise ValueError('Frequency could not be identified. Got %d days.' %
series_frequencies[0])
return frequency
def human_readable_number(number: float) -> str:
"""Print a large number in a readable format.
Return a readable format for a number, e.g. 123 milions becomes 123M.
Args:
number: a float to be printed in human readable format.
Returns:
readable_number: a string containing the formatted number.
"""
number = float('{:.3g}'.format(number))
magnitude = 0
while abs(number) >= 1000 and magnitude < 4:
magnitude += 1
number /= 1000.0
readable_number = '{}{}'.format('{:f}'.format(number).rstrip('0').rstrip('.'),
['', 'K', 'M', 'B', 'tn'][magnitude])
return readable_number
def change_background_row(df: pd.DataFrame, value: float, operation: str,
column: str):
"""Colors a row of a table based on the expression in input.
Color a row in:
- orange if the value of the column satisfies the expression in input
- beige if the value of the column satisfies the inverse expression in input
- green otherwise
For example, if the column has values [1, 2, 3] and we pass 'value' equal to
2, and operation '>', then
- 1 is marked in beige (1 < 2, which is the inverse expression)
- 2 is marked in green (it's not > and it's not <)
- 3 is marked in orange(3 > 2, which is the expression)
Args:
df: the table of which we want to change the background color.
value: term of comparison to be used in the expression.
operation: a string to define which operator to use, e.g. '>' or '='. For a
full list check _operator_functions.
column: name of the column to be used for the comparison
Returns:
pd.Series
"""
if _operator_functions[operation](float(df[column]), value):
return pd.Series('background-color: orange', df.index)
elif _operator_functions[_inverse_op[operation]](float(df[column]), value):
return pd.Series('background-color: beige', df.index)
else:
return pd.Series('background-color: lightgreen', df.index)
def flag_percentage_value(val, value: float, operation: str):
"""Colors a cell in red if its value satisfy the expression in input.
Colors a cell in red if the expression is true for that cell, e.g. if the
value of the cell is 10, 'value' in input is 5 and operation is '>', then we
will color the cell in red as 10 > 5.
Args:
val: value in a cell of a dataframe.
value: term of comparison used to decide the color of the cell.
operation: a string to define which operator to use, e.g. '>' or '='. For a
full list check _operator_functions.
Returns:
a str defining the color coding of the cell.
"""
if _operator_functions[operation](float(val.strip(' %')), value):
color = 'red'
else:
color = 'black'
return 'color: %s' % color
def create_output_table(results: pd.DataFrame,
total_response: float,
total_spend: float,
geo_treatment: pd.DataFrame,
budgets_for_design: List[float],
average_order_value: float,
num_geos: int,
confidence_level: float = 0.9,
power_level: float = 0.8) -> pd.DataFrame:
"""Creates the table with the output designs.
Args:
results: table with columns (num_pairs_filtered,
experiment_response, experiment_spend, spend_response_ratio, budget,
iroas, rmse, proportion_cost_in_experiment) containing the generated
design, e.g. the first output of the
function TrimmedMatchGeoXDesign.report_candidate_design.
total_response: total response for all geos (excluded as well) during the
evaluation period.
total_spend: total spend for all geos (excluded as well) during the
evaluation period.
geo_treatment: table with columns (geo, response, spend, pair) containing the
treatment geos and their overall response and spend during the evaluation
period.
budgets_for_design: list of budgets to be considered for the designs.
average_order_value: factor used to change scale from conversion count to
conversion value.
num_geos: number of geos available.
confidence_level: confidence level for the test H0: iROAS=0
vs H1: iROAS>=minimum_detectable_iroas.
power_level: level used for the power analysis.
Returns:
a pd.DataFrame with the output designs.
"""
calc_min_detectable_iroas = CalculateMinDetectableIroas(
1 - confidence_level, power_level)
designs = []
for budget in budgets_for_design:
tmp_result = results[results['budget'] == budget]
chosen_design = tmp_result.loc[tmp_result['rmse_cost_adjusted'].idxmin()]
baseline = geo_treatment.loc[
geo_treatment['pair'] > chosen_design['num_pairs_filtered'],
'response'].sum()
cost_in_experiment = geo_treatment.loc[
geo_treatment['pair'] > chosen_design['num_pairs_filtered'],
'spend'].sum()
min_detectable_iroas_raw = calc_min_detectable_iroas.at(
chosen_design['rmse'])
min_detectable_iroas = average_order_value * min_detectable_iroas_raw
min_detectable_lift = budget * 100 * min_detectable_iroas_raw / baseline
num_removed_geos = int(2 * chosen_design['num_pairs_filtered'])
num_geo_pairs = int((num_geos - num_removed_geos) / 2)
treat_control_removed = (f'{num_geo_pairs} / {num_geo_pairs} / ' +
f'{num_removed_geos}')
revenue_covered = 100 * baseline / total_response
proportion_cost_in_experiment = cost_in_experiment / total_spend
national_budget = human_readable_number(
budget / proportion_cost_in_experiment)
designs.append({
'Budget': human_readable_number(budget),
'Minimum detectable iROAS': f'{min_detectable_iroas:.3}',
'Minimum detectable lift in response': f'{min_detectable_lift:.2f} %',
'Treatment/control/excluded geos': treat_control_removed,
'Revenue covered by treatment group': f'{revenue_covered:.2f} %',
'Cost/baseline response': f'{(budget / baseline * 100):.2f} %',
'Cost if test budget is scaled nationally': national_budget
})
designs = pd.DataFrame(designs)
designs.index.rename('Design', inplace=True)
return designs
def format_table(
df: pd.DataFrame,
formatting_options: List[FormatOptions]) -> style.Styler:
"""Formats a table with the output designs.
Args:
df: a table to be formatted.
formatting_options: a dictionary indicating for each column (key) what
formatting function to be used and its additional args, e.g.
formatting_options =
{'column_1': {'function': fnc, 'args': {'input1': 1, 'input2': 2}}}
Returns:
a pandas.io.formats.style.Styler with the table formatted.
"""
for ind in range(len(formatting_options)):
tmp_options = formatting_options[ind]
if ind == 0:
# if axis is in the args, then the function should be applied on rows/cols
if 'axis' in tmp_options.args:
formatted_table = df.style.apply(tmp_options.function,
**tmp_options.args)
# apply the formatting elementwise
else:
formatted_table = df.style.applymap(tmp_options.function,
**tmp_options.args)
else:
# if axis is in the args, then the function should be applied on rows/cols
if 'axis' in tmp_options.args:
formatted_table = formatted_table.apply(tmp_options.function,
**tmp_options.args)
# apply the formatting elementwise
else:
formatted_table = formatted_table.applymap(tmp_options.function,
**tmp_options.args)
return formatted_table
def format_design_table(designs: pd.DataFrame,
minimum_detectable_iroas: float,
minimum_lift_in_response_metric: float = 10.0,
minimum_revenue_covered_by_treatment: float = 5.0):
"""Formats a table with the output designs.
Args:
designs: table with columns (Budget, Minimum detectable iROAS,
Minimum Detectable lift in response, Treatment/control/excluded geos,
Revenue covered by treatment group, Cost/baseline response,
Cost if test budget is scaled nationally) containing the output designs,
e.g. the output of the function create_output_table.
minimum_detectable_iroas: target minimum detectable iROAS used to define
the optimality of a design.
minimum_lift_in_response_metric: threshold minimum detectable lift
in percentage used to flag designs with higher detectable lift.
minimum_revenue_covered_by_treatment: value used to flag any design where the
treatment group is too small based on response.
Returns:
a pandas.io.formats.style.Styler with the table formatted.
"""
formatting_options = [
FormatOptions(
column='Minimum detectable lift in response',
function=flag_percentage_value,
args={
'value': minimum_lift_in_response_metric,
'operation': '>'
}),
FormatOptions(
column='Revenue covered by treatment group',
function=flag_percentage_value,
args={
'value': minimum_revenue_covered_by_treatment,
'operation': '<'
}),
FormatOptions(
column='Minimum detectable iROAS',
function=change_background_row,
args={
'value': minimum_detectable_iroas,
'operation': '>',
'axis': 1
})
]
return format_table(designs, formatting_options)
def check_input_data(
data: pd.DataFrame,
numeric_columns_to_impute: Optional[List[str]] = None) -> pd.DataFrame:
"""Returns data to be analysed using Trimmed Match with data imputation.
Args:
data: data frame with columns (date, geo) and any column specified in
numeric_columns_to_impute, which should contain at least the columns with
response and spend information if they have a different name than
'response' and 'cost', respectively.
numeric_columns_to_impute: list of columns for which data imputation must be
performed.
Returns:
data frame with columns (date, geo, response, cost) and imputed missing
data.
Raises:
ValueError: if one of the mandatory columns is missing.
"""
numeric_columns_to_impute = numeric_columns_to_impute or ['response', 'cost']
mandatory_columns = set(['date', 'geo'] + numeric_columns_to_impute)
if not mandatory_columns.issubset(data.columns):
raise ValueError('The mandatory columns ' +
f'{mandatory_columns - set(data.columns)} are missing ' +
'from the input data.')
data['date'] = pd.to_datetime(data['date'])
for column in ['geo'] + numeric_columns_to_impute:
try:
data[column] = pd.to_numeric(data[column])
except:
raise ValueError(f'Unable to convert column {column} to numeric.')
geos_and_dates = pd.DataFrame(
itertools.product(data['date'].unique(), data['geo'].unique()),
columns=['date', 'geo'])
data = pd.merge(
geos_and_dates, data, on=['date', 'geo'],
how='left').fillna(dict([
(x, 0) for x in numeric_columns_to_impute
])).sort_values(by=['date', 'geo']).reset_index(drop=True)
return data
| apache-2.0 |
winklerand/pandas | asv_bench/benchmarks/gil.py | 7 | 11003 | from .pandas_vb_common import *
from pandas.core.algorithms import take_1d
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
try:
from pandas._libs import algos
except ImportError:
from pandas import algos
try:
from pandas.util.testing import test_parallel
have_real_test_parallel = True
except ImportError:
have_real_test_parallel = False
def test_parallel(num_threads=1):
def wrapper(fname):
return fname
return wrapper
class NoGilGroupby(object):
goal_time = 0.2
def setup(self):
self.N = 1000000
self.ngroups = 1000
np.random.seed(1234)
self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
np.random.seed(1234)
self.size = 2 ** 22
self.ngroups = 100
self.data = Series(np.random.randint(0, self.ngroups, size=self.size))
if (not have_real_test_parallel):
raise NotImplementedError
@test_parallel(num_threads=2)
def _pg2_count(self):
self.df.groupby('key')['data'].count()
def time_count_2(self):
self._pg2_count()
@test_parallel(num_threads=2)
def _pg2_last(self):
self.df.groupby('key')['data'].last()
def time_last_2(self):
self._pg2_last()
@test_parallel(num_threads=2)
def _pg2_max(self):
self.df.groupby('key')['data'].max()
def time_max_2(self):
self._pg2_max()
@test_parallel(num_threads=2)
def _pg2_mean(self):
self.df.groupby('key')['data'].mean()
def time_mean_2(self):
self._pg2_mean()
@test_parallel(num_threads=2)
def _pg2_min(self):
self.df.groupby('key')['data'].min()
def time_min_2(self):
self._pg2_min()
@test_parallel(num_threads=2)
def _pg2_prod(self):
self.df.groupby('key')['data'].prod()
def time_prod_2(self):
self._pg2_prod()
@test_parallel(num_threads=2)
def _pg2_sum(self):
self.df.groupby('key')['data'].sum()
def time_sum_2(self):
self._pg2_sum()
@test_parallel(num_threads=4)
def _pg4_sum(self):
self.df.groupby('key')['data'].sum()
def time_sum_4(self):
self._pg4_sum()
def time_sum_4_notp(self):
for i in range(4):
self.df.groupby('key')['data'].sum()
def _f_sum(self):
self.df.groupby('key')['data'].sum()
@test_parallel(num_threads=8)
def _pg8_sum(self):
self._f_sum()
def time_sum_8(self):
self._pg8_sum()
def time_sum_8_notp(self):
for i in range(8):
self._f_sum()
@test_parallel(num_threads=2)
def _pg2_var(self):
self.df.groupby('key')['data'].var()
def time_var_2(self):
self._pg2_var()
# get groups
def _groups(self):
self.data.groupby(self.data).groups
@test_parallel(num_threads=2)
def _pg2_groups(self):
self._groups()
def time_groups_2(self):
self._pg2_groups()
@test_parallel(num_threads=4)
def _pg4_groups(self):
self._groups()
def time_groups_4(self):
self._pg4_groups()
@test_parallel(num_threads=8)
def _pg8_groups(self):
self._groups()
def time_groups_8(self):
self._pg8_groups()
class nogil_take1d_float64(object):
goal_time = 0.2
def setup(self):
self.N = 1000000
self.ngroups = 1000
np.random.seed(1234)
self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
if (not have_real_test_parallel):
raise NotImplementedError
self.N = 10000000.0
self.df = DataFrame({'int64': np.arange(self.N, dtype='int64'), 'float64': np.arange(self.N, dtype='float64'), })
self.indexer = np.arange(100, (len(self.df) - 100))
def time_nogil_take1d_float64(self):
self.take_1d_pg2_int64()
@test_parallel(num_threads=2)
def take_1d_pg2_int64(self):
take_1d(self.df.int64.values, self.indexer)
@test_parallel(num_threads=2)
def take_1d_pg2_float64(self):
take_1d(self.df.float64.values, self.indexer)
class nogil_take1d_int64(object):
goal_time = 0.2
def setup(self):
self.N = 1000000
self.ngroups = 1000
np.random.seed(1234)
self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
if (not have_real_test_parallel):
raise NotImplementedError
self.N = 10000000.0
self.df = DataFrame({'int64': np.arange(self.N, dtype='int64'), 'float64': np.arange(self.N, dtype='float64'), })
self.indexer = np.arange(100, (len(self.df) - 100))
def time_nogil_take1d_int64(self):
self.take_1d_pg2_float64()
@test_parallel(num_threads=2)
def take_1d_pg2_int64(self):
take_1d(self.df.int64.values, self.indexer)
@test_parallel(num_threads=2)
def take_1d_pg2_float64(self):
take_1d(self.df.float64.values, self.indexer)
class nogil_kth_smallest(object):
number = 1
repeat = 5
def setup(self):
if (not have_real_test_parallel):
raise NotImplementedError
np.random.seed(1234)
self.N = 10000000
self.k = 500000
self.a = np.random.randn(self.N)
self.b = self.a.copy()
self.kwargs_list = [{'arr': self.a}, {'arr': self.b}]
def time_nogil_kth_smallest(self):
@test_parallel(num_threads=2, kwargs_list=self.kwargs_list)
def run(arr):
algos.kth_smallest(arr, self.k)
run()
class nogil_datetime_fields(object):
goal_time = 0.2
def setup(self):
self.N = 100000000
self.dti = pd.date_range('1900-01-01', periods=self.N, freq='T')
self.period = self.dti.to_period('D')
if (not have_real_test_parallel):
raise NotImplementedError
def time_datetime_field_year(self):
@test_parallel(num_threads=2)
def run(dti):
dti.year
run(self.dti)
def time_datetime_field_day(self):
@test_parallel(num_threads=2)
def run(dti):
dti.day
run(self.dti)
def time_datetime_field_daysinmonth(self):
@test_parallel(num_threads=2)
def run(dti):
dti.days_in_month
run(self.dti)
def time_datetime_field_normalize(self):
@test_parallel(num_threads=2)
def run(dti):
dti.normalize()
run(self.dti)
def time_datetime_to_period(self):
@test_parallel(num_threads=2)
def run(dti):
dti.to_period('S')
run(self.dti)
def time_period_to_datetime(self):
@test_parallel(num_threads=2)
def run(period):
period.to_timestamp()
run(self.period)
class nogil_rolling_algos_slow(object):
goal_time = 0.2
def setup(self):
self.win = 100
np.random.seed(1234)
self.arr = np.random.rand(100000)
if (not have_real_test_parallel):
raise NotImplementedError
def time_nogil_rolling_median(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_median(arr, win)
run(self.arr, self.win)
class nogil_rolling_algos_fast(object):
goal_time = 0.2
def setup(self):
self.win = 100
np.random.seed(1234)
self.arr = np.random.rand(1000000)
if (not have_real_test_parallel):
raise NotImplementedError
def time_nogil_rolling_mean(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_mean(arr, win)
run(self.arr, self.win)
def time_nogil_rolling_min(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_min(arr, win)
run(self.arr, self.win)
def time_nogil_rolling_max(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_max(arr, win)
run(self.arr, self.win)
def time_nogil_rolling_var(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_var(arr, win)
run(self.arr, self.win)
def time_nogil_rolling_skew(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_skew(arr, win)
run(self.arr, self.win)
def time_nogil_rolling_kurt(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_kurt(arr, win)
run(self.arr, self.win)
def time_nogil_rolling_std(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_std(arr, win)
run(self.arr, self.win)
class nogil_read_csv(object):
number = 1
repeat = 5
def setup(self):
if (not have_real_test_parallel):
raise NotImplementedError
# Using the values
self.df = DataFrame(np.random.randn(10000, 50))
self.df.to_csv('__test__.csv')
self.rng = date_range('1/1/2000', periods=10000)
self.df_date_time = DataFrame(np.random.randn(10000, 50), index=self.rng)
self.df_date_time.to_csv('__test_datetime__.csv')
self.df_object = DataFrame('foo', index=self.df.index, columns=self.create_cols('object'))
self.df_object.to_csv('__test_object__.csv')
def create_cols(self, name):
return [('%s%03d' % (name, i)) for i in range(5)]
@test_parallel(num_threads=2)
def pg_read_csv(self):
read_csv('__test__.csv', sep=',', header=None, float_precision=None)
def time_read_csv(self):
self.pg_read_csv()
@test_parallel(num_threads=2)
def pg_read_csv_object(self):
read_csv('__test_object__.csv', sep=',')
def time_read_csv_object(self):
self.pg_read_csv_object()
@test_parallel(num_threads=2)
def pg_read_csv_datetime(self):
read_csv('__test_datetime__.csv', sep=',', header=None)
def time_read_csv_datetime(self):
self.pg_read_csv_datetime()
class nogil_factorize(object):
number = 1
repeat = 5
def setup(self):
if (not have_real_test_parallel):
raise NotImplementedError
np.random.seed(1234)
self.strings = tm.makeStringIndex(100000)
def factorize_strings(self):
pd.factorize(self.strings)
@test_parallel(num_threads=4)
def _pg_factorize_strings_4(self):
self.factorize_strings()
def time_factorize_strings_4(self):
for i in range(2):
self._pg_factorize_strings_4()
@test_parallel(num_threads=2)
def _pg_factorize_strings_2(self):
self.factorize_strings()
def time_factorize_strings_2(self):
for i in range(4):
self._pg_factorize_strings_2()
def time_factorize_strings(self):
for i in range(8):
self.factorize_strings()
| bsd-3-clause |
cxxgtxy/tensorflow | tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py | 137 | 2219 | # encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
from tensorflow.python.platform import test
class CategoricalTest(test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(
min_frequency=0, share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"],
["3", "Male"]])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
test.main()
| apache-2.0 |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/pandas/sparse/list.py | 16 | 3695 | import numpy as np
from pandas.core.base import PandasObject
from pandas.core.common import pprint_thing
from pandas.sparse.array import SparseArray
import pandas._sparse as splib
class SparseList(PandasObject):
"""
Data structure for accumulating data to be converted into a
SparseArray. Has similar API to the standard Python list
Parameters
----------
data : scalar or array-like
fill_value : scalar, default NaN
"""
def __init__(self, data=None, fill_value=np.nan):
self.fill_value = fill_value
self._chunks = []
if data is not None:
self.append(data)
def __unicode__(self):
contents = '\n'.join(repr(c) for c in self._chunks)
return '%s\n%s' % (object.__repr__(self), pprint_thing(contents))
def __len__(self):
return sum(len(c) for c in self._chunks)
def __getitem__(self, i):
if i < 0:
if i + len(self) < 0: # pragma: no cover
raise ValueError('%d out of range' % i)
i += len(self)
passed = 0
j = 0
while i >= passed + len(self._chunks[j]):
passed += len(self._chunks[j])
j += 1
return self._chunks[j][i - passed]
def __setitem__(self, i, value):
raise NotImplementedError
@property
def nchunks(self):
return len(self._chunks)
@property
def is_consolidated(self):
return self.nchunks == 1
def consolidate(self, inplace=True):
"""
Internally consolidate chunks of data
Parameters
----------
inplace : boolean, default True
Modify the calling object instead of constructing a new one
Returns
-------
splist : SparseList
If inplace=False, new object, otherwise reference to existing
object
"""
if not inplace:
result = self.copy()
else:
result = self
if result.is_consolidated:
return result
result._consolidate_inplace()
return result
def _consolidate_inplace(self):
new_values = np.concatenate([c.sp_values for c in self._chunks])
new_index = _concat_sparse_indexes([c.sp_index for c in self._chunks])
new_arr = SparseArray(new_values, sparse_index=new_index,
fill_value=self.fill_value)
self._chunks = [new_arr]
def copy(self):
"""
Return copy of the list
Returns
-------
new_list : SparseList
"""
new_splist = SparseList(fill_value=self.fill_value)
new_splist._chunks = list(self._chunks)
return new_splist
def to_array(self):
"""
Return SparseArray from data stored in the SparseList
Returns
-------
sparr : SparseArray
"""
self.consolidate(inplace=True)
return self._chunks[0]
def append(self, value):
"""
Append element or array-like chunk of data to the SparseList
Parameters
----------
value: scalar or array-like
"""
if np.isscalar(value):
value = [value]
sparr = SparseArray(value, fill_value=self.fill_value)
self._chunks.append(sparr)
self._consolidated = False
def _concat_sparse_indexes(indexes):
all_indices = []
total_length = 0
for index in indexes:
# increment by offset
inds = index.to_int_index().indices + total_length
all_indices.append(inds)
total_length += index.length
return splib.IntIndex(total_length, np.concatenate(all_indices))
| gpl-2.0 |
B3AU/waveTree | sklearn/tests/test_kernel_approximation.py | 6 | 5945 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
"""test that AdditiveChi2Sampler approximates kernel on random data"""
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
def test_skewed_chi2_sampler():
"""test that RBFSampler approximates kernel on random data"""
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
"""test that RBFSampler approximates kernel on random data"""
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
def test_input_validation():
"""Regression test: kernel approx. transformers should work on lists
No assertions; the old versions would simply crash
"""
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_poly_kernel_params():
"""Non-regression: Nystroem should pass other parameters beside gamma."""
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
"""Test Nystroem on a callable."""
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
redes-usach/morse-decoder | src/decoder.py | 1 | 2267 | import numpy as np
from scipy.io.wavfile import read,write
import matplotlib.pyplot as plt
from math import fabs
from translator import *
"""
TRANSFORMA EL AUDIO EN CEROS Y UNOS
Entrada: información del audio y frecuencia de muestreo
Salida: arreglo numpy de ceros y unos
"""
def separate_audio(data,rate):
lista = []
data = np.absolute(data)
max_value = np.amax(data)
media = data.mean()
threshold = ((max_value + media)/2)*1.1
for i in data:
if i>= threshold:
lista.append(1)
else:
lista.append(0)
a = np.array(lista)
return a
"""
DETERMINA EL PROMEDIO POR VENTANA PARA DEFINIR EL SONIDO
Entrada: información del audio y frecuencia de muestreo
Salida: contador de unos
"""
def ones_audio(data,rate):
piece = 2*round(len(data)/rate)
perfect=separate_audio(data,rate)
lista = []
uno = np.array(lista)
cont = 0
prom = 0
for i in perfect:
cont = cont + 1
prom = prom + i
if cont == piece:
cont = 0
prom = prom/piece
if prom > 0:
uno = np.append(uno,np.ones(piece))
else:
uno = np.append(uno,np.zeros(piece))
prom = 0
return count(uno)
"""
CALCULA EL LARGO MÁX Y MÍN DE CEROS Y UNOS SEGUIDOS
Entrada: información del audio
Salida: llamado a la función detector con el dato, max de unos, min y max de ceros
"""
def count(data):
min0 = 9999999
max0 = 0
min1 = 9999999
max1 = 0
count = 1
for i in range(1,len(data)):
if data[i]==data[i-1]:
count = count + 1
else:
if data[i]==0:
if count >= max1:
max1 = count
if count <= min1:
min1 = count
else:
if count >= max0:
max0 = count
if count <= min0:
min0 = count
count = 1
return detector(data,max1,min1,min0)
"""
CREA SECUENCIA DE CARACTERES CON PUNTO Y RAYAS SEGUN MAX Y MIN
Entrada: información del dato, max de unos, min y max de ceros
Salida: string con la secuencia del mensaje en morse
"""
def detector(data,max1,min1,min0):
morse = ""
count = 1
for i in range(1,len(data)-1):
if data[i]==data[i+1]:
count = count + 1
else:
if data[i-1]==1:
dif_max1 = fabs(max1-count)
dif_min1 = fabs(min1-count)
if dif_max1 > dif_min1:
morse = morse + "."
else:
morse = morse + "-"
else:
if count > 2*min0:
morse = morse + " "
count = 0
return morse
| lgpl-3.0 |
0asa/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
ilo10/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 272 | 7798 | """
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
| bsd-3-clause |
WarrenWeckesser/numpy | tools/refguide_check.py | 3 | 37844 | #!/usr/bin/env python3
"""
refguide_check.py [OPTIONS] [-- ARGS]
- Check for a NumPy submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
- Check docstring examples
- Check example blocks in RST files
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings::
$ python refguide_check.py --doctests ma
or in RST-based documentations::
$ python refguide_check.py --rst docs
"""
import copy
import doctest
import inspect
import io
import os
import re
import shutil
import sys
import tempfile
import warnings
import docutils.core
from argparse import ArgumentParser
from contextlib import contextmanager, redirect_stderr
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
from docutils.parsers.rst import directives
from pkg_resources import parse_version
import sphinx
import numpy as np
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
SKIPBLOCK = doctest.register_optionflag('SKIPBLOCK')
if parse_version(sphinx.__version__) >= parse_version('1.5'):
# Enable specific Sphinx directives
from sphinx.directives.other import SeeAlso, Only
directives.register_directive('seealso', SeeAlso)
directives.register_directive('only', Only)
else:
# Remove sphinx directives that don't run without Sphinx environment.
# Sphinx < 1.5 installs all directives on import...
directives._directives.pop('versionadded', None)
directives._directives.pop('versionchanged', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "numpy"
PUBLIC_SUBMODULES = [
'core',
'doc.structured_arrays',
'f2py',
'linalg',
'lib',
'lib.recfunctions',
'fft',
'ma',
'polynomial',
'matrixlib',
'random',
'testing',
]
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {
'fftpack.convolve': 'fftpack',
'io.wavfile': 'io',
'io.arff': 'io',
}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([
# cases where NumPy docstrings import things from SciPy:
'numpy.lib.vectorize',
'numpy.random.standard_gamma',
'numpy.random.gamma',
'numpy.random.vonmises',
'numpy.random.power',
'numpy.random.zipf',
# remote / local file IO with DataSource is problematic in doctest:
'numpy.lib.DataSource',
'numpy.lib.Repository',
])
# Skip non-numpy RST files, historical release notes
# Any single-directory exact match will skip the directory and all subdirs.
# Any exact match (like 'doc/release') will scan subdirs but skip files in
# the matched directory.
# Any filename will skip that file
RST_SKIPLIST = [
'scipy-sphinx-theme',
'sphinxext',
'neps',
'changelog',
'doc/release',
'doc/source/release',
'c-info.ufunc-tutorial.rst',
'c-info.python-as-glue.rst',
'f2py.getting-started.rst',
'arrays.nditer.cython.rst',
]
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = [
r'scipy\.sparse\.linalg',
r'scipy\.spatial\.distance',
r'scipy\.linalg\.blas\.[sdczi].*',
r'scipy\.linalg\.lapack\.[sdczi].*',
]
# these names are not required to be in an autosummary:: listing
# despite being in ALL
REFGUIDE_AUTOSUMMARY_SKIPLIST = [
# NOTE: should NumPy have a better match between autosummary
# listings and __all__? For now, TR isn't convinced this is a
# priority -- focus on just getting docstrings executed / correct
r'numpy\.*',
]
# deprecated windows in scipy.signal namespace
for name in ('barthann', 'bartlett', 'blackmanharris', 'blackman', 'bohman',
'boxcar', 'chebwin', 'cosine', 'exponential', 'flattop',
'gaussian', 'general_gaussian', 'hamming', 'hann', 'hanning',
'kaiser', 'nuttall', 'parzen', 'slepian', 'triang', 'tukey'):
REFGUIDE_AUTOSUMMARY_SKIPLIST.append(r'scipy\.signal\.' + name)
HAVE_MATPLOTLIB = False
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
Parameters
----------
path: str or None
cwd: str or None
Returns
-------
str
Relative path or absolute path based on current working directory
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
"""
Finds the occurrences of function names, special directives like data
and functions and scipy constants in the docstrings of `module`. The
following patterns are searched for:
* 3 spaces followed by function name, and maybe some spaces, some
dashes, and an explanation; only function names listed in
refguide are formatted like this (mostly, there may be some false
positives
* special directives, such as data and function
* (scipy.constants only): quoted list
The `names_dict` is updated by reference and accessible in calling method
Parameters
----------
module : ModuleType
The module, whose docstrings is to be searched
names_dict : dict
Dictionary which contains module name as key and a set of found
function names and directives as value
Returns
-------
None
"""
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""
Return a copy of the __all__ dict with irrelevant items removed.
Parameters
----------
module : ModuleType
The module whose __all__ dict has to be processed
Returns
-------
deprecated : list
List of callable and deprecated sub modules
not_deprecated : list
List of non callable or non deprecated sub modules
others : list
List of remaining types of sub modules
"""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
if not all_dict:
# Must be a pure documentation module like doc.structured_arrays
all_dict.append('__doc__')
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""
Return sets of objects from all_dict.
Will return three sets:
{in module_name.__all__},
{in REFGUIDE*},
and {missing from others}
Parameters
----------
all_dict : list
List of non deprecated sub modules for module_name
others : list
List of sub modules for module_name
names : set
Set of function names or special directives present in
docstring of module_name
module_name : ModuleType
Returns
-------
only_all : set
only_ref : set
missing : set
"""
only_all = set()
for name in all_dict:
if name not in names:
for pat in REFGUIDE_AUTOSUMMARY_SKIPLIST:
if re.match(pat, module_name + '.' + name):
break
else:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
"""
Check if module `f` is deprecated
Parameter
---------
f : ModuleType
Returns
-------
bool
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg":None})
except DeprecationWarning:
return True
except Exception:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
"""
Check that `all_dict` is consistent with the `names` in `module_name`
For instance, that there are no deprecated or extra objects.
Parameters
----------
all_dict : list
names : set
deprecated : list
others : list
module_name : ModuleType
dots : bool
Whether to print a dot for each check
Returns
-------
list
List of [(name, success_flag, output)...]
"""
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = set(only_ref).intersection(deprecated)
only_ref = set(only_ref).difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
output += "\nThis issue can be fixed by adding these objects to\n"
output += "the function listing in __init__.py for this module\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
output += "\nThis issue should likely be fixed by removing these objects\n"
output += "from the function listing in __init__.py for this module\n"
output += "or adding them to __all__.\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
"""
Validates the doc string in a snippet of documentation
`text` from file `name`
Parameters
----------
text : str
Docstring text
name : str
File name for which the doc string is to be validated
dots : bool
Whether to print a dot symbol for each check
Returns
-------
(bool, str)
"""
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'currentmodule', 'autosummary', 'data', 'attr',
'obj', 'versionadded', 'versionchanged', 'module', 'class',
'ref', 'func', 'toctree', 'moduleauthor', 'term', 'c:member',
'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv'
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Parameters
----------
module : ModuleType
names : set
Returns
-------
result : list
List of [(module_name, success_flag, output),...]
"""
try:
skip_types = (dict, str, unicode, float, int)
except NameError:
# python 3
skip_types = (dict, str, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except Exception:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
m = re.search("([\x00-\x09\x0b-\x1f])", text)
if m:
msg = ("Docstring contains a non-printable character %r! "
"Maybe forgot r\"\"\"?" % (m.group(1),))
results.append((full_name, False, msg))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))
return results
### Doctest helpers ####
# the namespace to run examples in
DEFAULT_NAMESPACE = {'np': np}
# the namespace to do checks in
CHECK_NAMESPACE = {
'np': np,
'numpy': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'matrix': np.matrix,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float32': np.float32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf,
'StringIO': io.StringIO,
}
class DTRunner(doctest.DocTestRunner):
"""
The doctest runner
"""
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
self._report_item_name(out)
return doctest.DocTestRunner.report_unexpected_exception(
self, out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
"""
Check the docstrings
"""
obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary",
"# uninitialized", "#uninitialized"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', '.axis(', '.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim',
'# reformatted', '.set_xlabel(', '.set_ylabel(', '.set_zlabel(',
'.set(xlim=', '.set(ylim=', '.set(xlabel=', '.set(ylabel='}
def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
if ns is None:
self.ns = CHECK_NAMESPACE
else:
self.ns = ns
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(self.ns))
a_got = eval(got, dict(self.ns))
except Exception:
# Maybe we're printing a numpy array? This produces invalid python
# code: `print(np.arange(3))` produces "[0 1 2]" w/o commas between
# values. So, reinsert commas and retry.
# TODO: handle (1) abberivation (`print(np.arange(10000))`), and
# (2) n-dim arrays with n > 1
s_want = want.strip()
s_got = got.strip()
cond = (s_want.startswith("[") and s_want.endswith("]") and
s_got.startswith("[") and s_got.endswith("]"))
if cond:
s_want = ", ".join(s_want[1:-1].split())
s_got = ", ".join(s_got[1:-1].split())
return self.check_output(s_want, s_got, optionflags)
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
num = len(a_want)
regex = (r'[\w\d_]+\(' +
', '.join([r'[\w\d_]+=(.+)']*num) +
r'\)')
grp = re.findall(regex, got.replace('\n', ' '))
if len(grp) > 1: # no more than one for now
return False
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return self.check_output(want, got_again, optionflags)
except Exception:
return False
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except (TypeError, ValueError):
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogeneous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
def _run_doctests(tests, full_name, verbose, doctest_warnings):
"""
Run modified doctests for the set of `tests`.
Parameters
----------
tests: list
full_name : str
verbose : bool
doctest_warning : bool
Returns
-------
tuple(bool, list)
Tuple of (success, output)
"""
flags = NORMALIZE_WHITESPACE | ELLIPSIS
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = io.StringIO(newline='')
success = True
# Redirect stderr to the stdout or output
tmp_stderr = sys.stdout if doctest_warnings else output
@contextmanager
def temp_cwd():
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
try:
os.chdir(tmpdir)
yield tmpdir
finally:
os.chdir(cwd)
shutil.rmtree(tmpdir)
# Run tests, trying to restore global state afterward
cwd = os.getcwd()
with np.errstate(), np.printoptions(), temp_cwd() as tmpdir, \
redirect_stderr(tmp_stderr):
# try to ensure random seed is NOT reproducible
np.random.seed(None)
ns = {}
for t in tests:
# We broke the tests up into chunks to try to avoid PSEUDOCODE
# This has the unfortunate side effect of restarting the global
# namespace for each test chunk, so variables will be "lost" after
# a chunk. Chain the globals to avoid this
t.globs.update(ns)
t.filename = short_path(t.filename, cwd)
# Process our options
if any([SKIPBLOCK in ex.options for ex in t.examples]):
continue
fails, successes = runner.run(t, out=output.write, clear_globs=False)
if fails > 0:
success = False
ns = t.globs
output.seek(0)
return success, output.read()
def check_doctests(module, verbose, ns=None,
dots=True, doctest_warnings=False):
"""
Check code in docstrings of the module's public symbols.
Parameters
----------
module : ModuleType
Name of module
verbose : bool
Should the result be verbose
ns : dict
Name space of module
dots : bool
doctest_warnings : bool
Returns
-------
results : list
List of [(item_name, success_flag, output), ...]
"""
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
# Loop over non-deprecated items
results = []
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPLIST:
continue
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except Exception:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, output))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def check_doctests_testfile(fname, verbose, ns=None,
dots=True, doctest_warnings=False):
"""
Check code in a text file.
Mimic `check_doctests` above, differing mostly in test discovery.
(which is borrowed from stdlib's doctest.testfile here,
https://github.com/python-git/python/blob/master/Lib/doctest.py)
Parameters
----------
fname : str
File name
verbose : bool
ns : dict
Name space
dots : bool
doctest_warnings : bool
Returns
-------
list
List of [(item_name, success_flag, output), ...]
Notes
-----
refguide can be signalled to skip testing code by adding
``#doctest: +SKIP`` to the end of the line. If the output varies or is
random, add ``# may vary`` or ``# random`` to the comment. for example
>>> plt.plot(...) # doctest: +SKIP
>>> random.randint(0,10)
5 # random
We also try to weed out pseudocode:
* We maintain a list of exceptions which signal pseudocode,
* We split the text file into "blocks" of code separated by empty lines
and/or intervening text.
* If a block contains a marker, the whole block is then assumed to be
pseudocode. It is then not being doctested.
The rationale is that typically, the text looks like this:
blah
<BLANKLINE>
>>> from numpy import some_module # pseudocode!
>>> func = some_module.some_function
>>> func(42) # still pseudocode
146
<BLANKLINE>
blah
<BLANKLINE>
>>> 2 + 3 # real code, doctest it
5
"""
if ns is None:
ns = CHECK_NAMESPACE
results = []
_, short_name = os.path.split(fname)
if short_name in DOCTEST_SKIPLIST:
return results
full_name = fname
with open(fname, encoding='utf-8') as f:
text = f.read()
PSEUDOCODE = set(['some_function', 'some_module', 'import example',
'ctypes.CDLL', # likely need compiling, skip it
'integrate.nquad(func,' # ctypes integrate tutotial
])
# split the text into "blocks" and try to detect and omit pseudocode blocks.
parser = doctest.DocTestParser()
good_parts = []
base_line_no = 0
for part in text.split('\n\n'):
try:
tests = parser.get_doctest(part, ns, fname, fname, base_line_no)
except ValueError as e:
if e.args[0].startswith('line '):
# fix line number since `parser.get_doctest` does not increment
# the reported line number by base_line_no in the error message
parts = e.args[0].split()
parts[1] = str(int(parts[1]) + base_line_no)
e.args = (' '.join(parts),) + e.args[1:]
raise
if any(word in ex.source for word in PSEUDOCODE
for ex in tests.examples):
# omit it
pass
else:
# `part` looks like a good code, let's doctest it
good_parts.append((part, base_line_no))
base_line_no += part.count('\n') + 2
# Reassemble the good bits and doctest them:
tests = []
for good_text, line_no in good_parts:
tests.append(parser.get_doctest(good_text, ns, fname, fname, line_no))
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, output))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def iter_included_files(base_path, verbose=0, suffixes=('.rst',)):
"""
Generator function to walk `base_path` and its subdirectories, skipping
files or directories in RST_SKIPLIST, and yield each file with a suffix in
`suffixes`
Parameters
----------
base_path : str
Base path of the directory to be processed
verbose : int
suffixes : tuple
Yields
------
path
Path of the directory and it's sub directories
"""
if os.path.exists(base_path) and os.path.isfile(base_path):
yield base_path
for dir_name, subdirs, files in os.walk(base_path, topdown=True):
if dir_name in RST_SKIPLIST:
if verbose > 0:
sys.stderr.write('skipping files in %s' % dir_name)
files = []
for p in RST_SKIPLIST:
if p in subdirs:
if verbose > 0:
sys.stderr.write('skipping %s and subdirs' % p)
subdirs.remove(p)
for f in files:
if (os.path.splitext(f)[1] in suffixes and
f not in RST_SKIPLIST):
yield os.path.join(dir_name, f)
def check_documentation(base_path, results, args, dots):
"""
Check examples in any *.rst located inside `base_path`.
Add the output to `results`.
See Also
--------
check_doctests_testfile
"""
for filename in iter_included_files(base_path, args.verbose):
if dots:
sys.stderr.write(filename + ' ')
sys.stderr.flush()
tut_results = check_doctests_testfile(
filename,
(args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
# stub out a "module" which is needed when reporting the result
def scratch():
pass
scratch.__name__ = filename
results.append((scratch, tut_results))
if dots:
sys.stderr.write('\n')
sys.stderr.flush()
def init_matplotlib():
"""
Check feasibility of matplotlib initialization.
"""
global HAVE_MATPLOTLIB
try:
import matplotlib
matplotlib.use('Agg')
HAVE_MATPLOTLIB = True
except ImportError:
HAVE_MATPLOTLIB = False
def main(argv):
"""
Validates the docstrings of all the pre decided set of
modules for errors and docstring standards.
"""
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=[],
nargs='*', help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true",
help="Run also doctests on ")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
parser.add_argument("--rst", nargs='?', const='doc', default=None,
help=("Run also examples from *rst files "
"discovered walking the directory(s) specified, "
"defaults to 'doc'"))
args = parser.parse_args(argv)
modules = []
names_dict = {}
if not args.module_names:
args.module_names = list(PUBLIC_SUBMODULES)
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in module_names:
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
dots = True
success = True
results = []
errormsgs = []
if args.doctests or args.rst:
init_matplotlib()
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
if args.doctests or not args.rst:
print("Running checks for %d modules:" % (len(modules),))
for module in modules:
if dots:
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others,
module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write('\n')
sys.stderr.flush()
if args.rst:
base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
rst_path = os.path.relpath(os.path.join(base_dir, args.rst))
if os.path.exists(rst_path):
print('\nChecking files in %s:' % rst_path)
check_documentation(rst_path, results, args, dots)
else:
sys.stderr.write(f'\ninvalid --rst argument "{args.rst}"')
errormsgs.append('invalid directory argument to --rst')
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
for module, mod_results in results:
success = all(x[1] for x in mod_results)
if not success:
errormsgs.append(f'failed checking {module.__name__}')
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if len(errormsgs) == 0:
print("\nOK: all checks passed!")
sys.exit(0)
else:
print('\nERROR: ', '\n '.join(errormsgs))
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
| bsd-3-clause |
elyase/masstable | masstable/masstable.py | 1 | 20624 | # -*- coding: utf-8 -*-
from __future__ import annotations
import pandas as pd
import os
import math
import functools
from functools import wraps
from typing import Callable, List, Tuple
package_dir, _ = os.path.split(__file__)
def memoize(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
class Table:
def __init__(self, name: str = "", df: pd.DataFrame = None):
"Init from a Series/Dataframe (df) of a file (name)"
if df is not None: # init from dataframe
self.df = df
self.name = name
elif name in self._names: # init from name
self.name = name
self.df = self.from_name(name).df
# self.df.name = name
else:
print("Error: Invalid table name. Valid names are:")
print(" ".join(Table.names))
return None
_names = [
"AME2003",
"AME2003all",
"AME2012",
"AME2012all",
"AME1995",
"AME1995all",
"DUZU",
"FRDM95",
"KTUY05",
"ETFSI12",
"HFB14",
"HFB26",
"TCSM12",
"TCSM13",
"BR2013",
"MAJA88",
"GK88",
"WS32010",
"WS32011",
"SVM13",
]
@classmethod
def names(cls):
"""Return a list of the names of all supported mass models
Example:
>>> Table.names()
['AME2003', 'AME2003all', 'AME2012', 'AME2012all', 'AME1995',
'AME1995all', 'DUZU', 'FRDM95', 'KTUY05', 'ETFSI12', 'HFB14',
'HFB26', 'TCSM12', 'TCSM13', 'BR2013', 'MAJA88', 'GK88', 'WS32010', 'WS32011',
'SVM13']
"""
return cls._names
@classmethod
def from_name(cls, name: str):
"Imports a mass table from a file"
filename = os.path.join(package_dir, "data", name + ".txt")
return cls.from_file(filename, name)
@classmethod
def from_file(cls, filename: str, name: str = ""):
"Imports a mass table from a file"
df = pd.read_csv(filename, header=0, delim_whitespace=True, index_col=[0, 1])[
"M"
]
df.name = name
return cls(df=df, name=name)
@classmethod
def from_ZNM(cls, Z, N, M, name=""):
"""
Creates a table from arrays Z, N and M
Example:
________
>>> Z = [82, 82, 83]
>>> N = [126, 127, 130]
>>> M = [-21.34, -18.0, -14.45]
>>> Table.from_ZNM(Z, N, M, name='Custom Table')
Z N
82 126 -21.34
127 -18.00
83 130 -14.45
Name: Custom Table, dtype: float64
"""
df = pd.DataFrame.from_dict({"Z": Z, "N": N, "M": M}).set_index(["Z", "N"])["M"]
df.name = name
return cls(df=df, name=name)
@classmethod
def from_array(cls, arr, name=""):
Z, N, M = arr.T
return cls.from_ZNM(Z, N, M, name)
def to_file(self, path: str):
"""Export the contents to a file as comma separated values.
Parameters:
path : File path where the data should be saved to
Example:
Export the last ten elements of AME2012 to a new file:
>>> Table('AME2012').tail(10).to_file('last_ten.txt')
"""
with open(path, "w") as f:
f.write("Z N M\n")
self.df.to_csv(path, sep="\t", mode="a")
@property
def Z(self):
"""
Return the proton number Z for all nuclei in the table as a numpy array.
"""
return self.df.index.get_level_values("Z").values
@property
def N(self):
"""
Return the neutron number N for all nuclei in the table as a numpy array.
"""
return self.df.index.get_level_values("N").values
@property
def A(self):
"""
Return the mass number A for all nuclei in the table as a numpy array.
"""
return self.Z + self.N
def __getitem__(self, index):
"""Access [] operator
Examples
--------
>>> Table('DUZU')[82, 126:127]
DUZU
Z N
82 126 -22.29
127 -17.87
>>> Table('AME2012all')[118, :]
AME2012all
Z N
118 173 198.93
174 199.27
175 201.43
"""
if isinstance(index, tuple) and len(index) == 2:
# can probably be simplified with pd.IndexSlice
if isinstance(index[0], int): # single N: "[82, :]"
startZ, stopZ = index[0], index[0]
if isinstance(index[1], int):
startN, stopN = index[1], index[1] # single N: "[:, 126]"
if isinstance(index[0], slice): # Z slice: "[:, 126]"
startZ, stopZ, stepZ = index[0].start, index[0].stop, index[0].step
if isinstance(index[1], slice): # N slice: "[:, 126]"
startN, stopN, stepN = index[1].start, index[1].stop, index[1].step
if not startZ:
startZ = self.Z.min() # might be optimized
if not stopZ:
stopZ = self.Z.max()
if not startN:
startN = self.N.min()
if not stopN:
stopN = self.N.max()
x = self.df.reset_index()
x = x.loc[
(x.Z >= startZ) & (x.Z <= stopZ) & (x.N >= startN) & (x.N <= stopN)
]
df = x.set_index(["Z", "N"]).sort_index(0)
return Table(df=df[df.columns[0]], name=self.name)
if isinstance(index, list):
return self.at(index)
if isinstance(index, Callable):
return self.select(index)
def __setitem__(self, key: int, value: int) -> None:
Z = key[0]
N = key[1]
self.df.loc[(Z, N)] = value
def __getattr__(self, attr):
# TODO: Pythonize
"Pass properties and method calls to the DataFrame object"
instance_method = getattr(self.df, attr)
if callable(instance_method):
def fn(*args, **kwargs):
result = instance_method(
*args, **kwargs
) # ()->call the instance method
if isinstance(result, (pd.DataFrame, pd.Series)):
try:
name = result.name
except AttributeError:
name = None
return Table(name=name, df=result) # wrap in Table class
return fn
else:
return instance_method
def __iter__(self):
for e in self.df.iteritems():
yield e
def __add__(self, other):
return Table(df=self.df + other.df, name="{}+{}".format(self.name, other.name))
def __sub__(self, other):
return Table(df=self.df - other.df, name="{}+{}".format(self.name, other.name))
def __div__(self, other):
return Table(df=self.df - other.df, name="{}+{}".format(self.name, other.name))
def align(self, *args, **kwargs):
result = self.df.align(*args, **kwargs)[0]
return Table(result.name, result)
def select(self, condition: Callable, name: str = "") -> Table:
"""
Selects nuclei according to a condition on Z,N or M
Parameters:
condition:
Can have one of the signatures f(M), f(Z,N) or f(Z, N, M)
must return a boolean value
name:
optional name for the resulting Table
Example:
Select all nuclei with A > 160:
>>> A_gt_160 = lambda Z,N: Z + N > 160
>>> Table('AME2003').select(A_gt_160)
"""
if condition.__code__.co_argcount == 1:
idx = [(Z, N) for (Z, N), M in self if condition(M)]
if condition.__code__.co_argcount == 2:
idx = [(Z, N) for (Z, N) in self.index if condition(Z, N)]
if condition.__code__.co_argcount == 3:
idx = [(Z, N) for (Z, N), M in self if condition(Z, N, M)]
index = pd.MultiIndex.from_tuples(idx, names=["Z", "N"])
return Table(df=self.df.loc[index], name=name)
def at(self, nuclei: List[Tuple[int, int]]) -> Table:
"""Return a selection of the Table at positions given by ``nuclei``
Parameters:
nuclei: list of tuples
A list where each element is tuple of the form (Z,N)
Example
-------
Return binding energies at magic nuclei:
>>> magic_nuclei = [(20,28), (50,50), (50,82), (82,126)]
>>> Table('AME2012').binding_energy.at(magic_nuclei)
Z N
20 28 416.014215
50 50 825.325172
82 1102.876416
82 126 1636.486450
"""
index = pd.MultiIndex.from_tuples(nuclei, names=["Z", "N"])
return Table(df=self.df.loc[index], name=self.name)
@classmethod
def empty(cls, name: str = "") -> Table:
return cls(df=pd.DataFrame(index=[], columns=[]), name=name)
def __len__(self):
"""Return the total number of nuclei
Example
-------
>>> len(Table('AME2012'))
2438
"""
return len(self.df)
@property
def count(self) -> int:
"""Return the total number of nuclei in the table
Example:
>>> Table('AME2012').count
2438
It is also possible to do:
>>> len(Table('AME2012'))
2438
"""
return len(self.df)
def intersection(self, table: Table) -> Table:
"""
Select nuclei which also belong to ``table``
Parameters:
table: a Table object
Example
-------
>>> Table('AME2003').intersection(Table('AME1995'))
"""
idx = self.df.index.intersection(table.df.index)
return Table(df=self.df[idx], name=self.name)
def not_in(self, table: Table) -> Table:
"""
Select nuclei not in table
Parameters:
table: Table
Table object from where nuclei should be removed
Example
-------
Find the new nuclei in AME2003 with Z,N >= 8:
>>> Table('AME2003').not_in(Table('AME1995'))[8:,8:].count
389
"""
idx = self.df.index.difference(table.df.index)
return Table(df=self.df[idx], name=self.name)
@property
@memoize
def odd_odd(self):
"""Selects odd-odd nuclei from the table:
>>> Table('FRDM95').odd_odd
Z N
9 9 1.21
11 0.10
13 3.08
15 9.32
...
"""
return self.select(lambda Z, N: (Z % 2) and (N % 2), name=self.name)
@property
@memoize
def odd_even(self) -> Table:
"""
Selects odd-even nuclei from the table
"""
return self.select(lambda Z, N: (Z % 2) and not (N % 2), name=self.name)
@property
@memoize
def even_odd(self):
"""
Selects even-odd nuclei from the table
"""
return self.select(lambda Z, N: not (Z % 2) and (N % 2), name=self.name)
@property
@memoize
def even_even(self):
"""
Selects even-even nuclei from the table
"""
return self.select(lambda Z, N: not (Z % 2) and not (N % 2), name=self.name)
def error(self, relative_to: str = "AME2003") -> Table:
"""
Calculate error difference
Parameters:
relative_to: a valid mass table name
Example
-------
>>> Table('DUZU').error(relative_to='AME2003').dropna()
Z N
8 8 0.667001
9 0.138813
10 -0.598478
11 -0.684870
12 -1.167462
"""
df = self.df - Table(relative_to).df
return Table(df=df)
def rmse(self, relative_to: str = "AME2003"):
"""Calculate root mean squared error
Parameters:
relative_to: a valid mass table name.
Example
>>> template = '{0:10}|{1:^6.2f}|{2:^6.2f}|{3:^6.2f}'
>>> print('Model ', 'AME95 ', 'AME03 ', 'AME12 ') # Table header
... for name in Table.names:
... print(template.format(name, Table(name).rmse(relative_to='AME1995'),
... Table(name).rmse(relative_to='AME2003'),
... Table(name).rmse(relative_to='AME2012')))
Model AME95 AME03 AME12
AME2003 | 0.13 | 0.00 | 0.13
AME2003all| 0.42 | 0.40 | 0.71
AME2012 | 0.16 | 0.13 | 0.00
AME2012all| 0.43 | 0.43 | 0.69
AME1995 | 0.00 | 0.13 | 0.16
AME1995all| 0.00 | 0.17 | 0.21
DUZU | 0.52 | 0.52 | 0.76
FRDM95 | 0.79 | 0.78 | 0.95
KTUY05 | 0.78 | 0.77 | 1.03
ETFSI12 | 0.84 | 0.84 | 1.04
HFB14 | 0.84 | 0.83 | 1.02
"""
error = self.error(relative_to=relative_to)
return math.sqrt((error.df ** 2).mean())
@property
@memoize
def binding_energy(self):
"""
Return binding energies instead of mass excesses
"""
M_P = 938.2723
# MeV
M_E = 0.5110
# MeV
M_N = 939.5656
# MeV
AMU = 931.494028
# MeV
df = self.Z * (M_P + M_E) + (self.A - self.Z) * M_N - (self.df + self.A * AMU)
return Table(df=df, name="BE" + "(" + self.name + ")")
@property
@memoize
def q_alpha(self):
"""Return Q_alpha"""
M_ALPHA = 2.4249156 # He4 mass excess in MeV
f = lambda parent, daugther: parent - daugther - M_ALPHA
return self.derived("Q_alpha", (-2, -2), f)
@property
@memoize
def q_beta(self):
"""Return Q_beta"""
f = lambda parent, daugther: parent - daugther
return self.derived("Q_beta", (1, -1), f)
@property
@memoize
def s2n(self):
"""Return 2 neutron separation energy"""
M_N = 8.0713171 # neutron mass excess in MeV
f = lambda parent, daugther: -parent + daugther + 2 * M_N
return self.derived("s2n", (0, -2), f)
@property
@memoize
def s1n(self) -> Table:
"""Return 1 neutron separation energy"""
M_N = 8.0713171 # neutron mass excess in MeV
f = lambda parent, daugther: -parent + daugther + M_N
return self.derived("s1n", (0, -1), f)
@property
@memoize
def s2p(self):
"""Return 2 proton separation energy"""
M_P = 7.28897050 # proton mass excess in MeV
f = lambda parent, daugther: -parent + daugther + 2 * M_P
return self.derived("s2p", (-2, 0), f)
@property
@memoize
def s1p(self):
"""Return 1 proton separation energy"""
M_P = 7.28897050 # proton mass excess in MeV
f = lambda parent, daugther: -parent + daugther + M_P
return self.derived("s1p", (-1, 0), f)
def derived(self, name: str, relative_coords: Tuple[int, int], formula: Callable):
"""Helper function for derived quantities"""
dZ, dN = relative_coords
daughter_idx = [(Z + dZ, N + dN) for Z, N in self.df.index]
idx = self.df.index.intersection(daughter_idx)
values = formula(self.df.values, self.df.reindex(daughter_idx).values)
return Table(
df=pd.Series(values, index=self.df.index, name=name + "(" + self.name + ")")
)
@property
@memoize
def ds2n(self):
"""Calculates the derivative of the neutron separation energies:
ds2n(Z,A) = s2n(Z,A) - s2n(Z,A+2)
"""
idx = [(x[0] + 0, x[1] + 2) for x in self.df.index]
values = self.s2n.values - self.s2n.loc[idx].values
return Table(
df=pd.Series(
values, index=self.df.index, name="ds2n" + "(" + self.name + ")"
)
)
@property
@memoize
def ds2p(self):
"""Calculates the derivative of the neutron separation energies:
ds2n(Z,A) = s2n(Z,A) - s2n(Z,A+2)
"""
idx = [(x[0] + 2, x[1]) for x in self.df.index]
values = self.s2p.values - self.s2p.loc[idx].values
return Table(
df=pd.Series(
values, index=self.df.index, name="ds2p" + "(" + self.name + ")"
)
)
def __repr__(self):
return self.df.__repr__()
def __str__(self):
return self.df.__str__()
def join(self, join="outer", *tables):
return Table(df=pd.concat([self.df] + [table.df for table in tables], axis=1))
def chart_plot(
self,
ax=None,
cmap: str = "RdBu",
xlabel: str = "N",
ylabel: str = "Z",
grid_on: bool = True,
colorbar: bool = True,
save_path: str = None,
):
"""Plot a nuclear chart with (N,Z) as axis and the values
of the Table as a color scale
Parameters:
ax: optional matplotlib axes
defaults to current axes
cmap: a matplotlib colormap
default: 'RdBu'
xlabel: string representing the label of the x axis
default: 'N'
ylabel: string, default: 'Z'
the label of the x axis
grid_on: (boolean), default: True,
whether to draw the axes grid or not
colorbar: boolean, default: True
whether to draw a colorbar or not
Returns:
ax: a matplotlib axes object
Example
-------
Plot the theoretical deviation for the Möller's model::
>>> Table('FRDM95').error().chart_plot()
"""
from scipy.interpolate import griddata
from numpy import linspace, meshgrid
import matplotlib.pyplot as plt
# extract the 1D arrays to be plotted
x = self.dropna().N
y = self.dropna().Z
z = self.dropna().values
# convert to matplotlibs grid format
xi = linspace(min(x), max(x), max(x) - min(x) + 1)
yi = linspace(min(y), max(y), max(y) - min(y) + 1)
X, Y = meshgrid(xi, yi)
Z = griddata((x, y), z, (X, Y), method="linear")
# create and customize plot
if ax is None:
ax = plt.gca()
chart = ax.pcolormesh(X, Y, Z, cmap=cmap, shading="auto")
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.grid(grid_on)
ax.set_aspect("equal")
if colorbar:
plt.colorbar(chart)
if save_path is not None:
fig = plt.gcf()
fig.savefig(save_path)
return ax
def chart_altair(
self,
title: str = "",
width: int = 600,
path: str = None,
scheme="viridis",
fmt=".2f",
overlay_text: bool = True,
legend_orientation="vertical",
):
import altair as alt
data = self.df.dropna().reset_index()[["Z", "N"]]
data["color"] = self.df.dropna().values
base = alt.Chart(data).encode(
alt.X("N:O", scale=alt.Scale(paddingInner=0)),
alt.Y(
"Z:O",
scale=alt.Scale(paddingInner=0),
sort=alt.EncodingSortField("Z", order="descending"),
),
)
chart = base.mark_rect().encode(
color=alt.Color(
"color:Q",
scale=alt.Scale(scheme=scheme),
legend=alt.Legend(direction=legend_orientation),
title=title,
)
)
if overlay_text:
text = base.mark_text(baseline="middle").encode(
text=alt.Text("color:Q", format=fmt)
)
chart = chart + text
x_range = data["N"].max() - data["N"].min()
y_range = data["Z"].max() - data["Z"].min()
height = round(width * y_range / x_range)
chart = chart.properties(width=width, height=height)
if path is not None:
chart.save(path)
return chart
| mit |
Transkribus/TranskribusDU | TranskribusDU/contentProcessing/taggerIEmerge.py | 1 | 21191 | # -*- coding: utf-8 -*-
"""
taggerIEmerge.py
task: recognition of multi ouptuts classes
H. Déjean
copyright Naver labs Europe 2018
READ project
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import sys,os
from io import open
from optparse import OptionParser
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))))
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import confusion_matrix
os.environ['KERAS_BACKEND'] = 'tensorflow'
from keras.models import Sequential, load_model, Model
from keras.layers import Bidirectional, Dropout, Input
from keras.layers.wrappers import TimeDistributed
from keras.layers.recurrent import LSTM
from keras.layers.core import Dense, Masking
from keras.regularizers import L1L2
import numpy as np
import pickle
import gzip
from contentProcessing.attentiondecoder import AttentionDecoder
class Transformer(BaseEstimator, TransformerMixin):
def __init__(self):
BaseEstimator.__init__(self)
TransformerMixin.__init__(self)
def fit(self, l, y=None):
return self
def transform(self, l):
assert False, "Specialize this method!"
class SparseToDense(Transformer):
def __init__(self):
Transformer.__init__(self)
def transform(self, o):
return o.toarray()
class NodeTransformerTextEnclosed(Transformer):
"""
we will get a list of block and need to send back what a textual feature extractor (TfidfVectorizer) needs.
So we return a list of strings
"""
def transform(self, lw):
return map(lambda x: x, lw)
class DeepTagger():
usage = ""
version = "v.01"
description = "description: keras/bilstm ner"
def __init__(self):
self.dirName = None
self.sModelName = None
self.sAux = "aux.pkl"
self.lnbClasses = None
self.max_sentence_len = 0
self.max_features = 100
self.maxngram = 3
self.nbEpochs = 10
self.batch_size = 50
self.hiddenSize= 32
self.bGridSearch = False
self.bTraining_multitype,self.bTraining, self.bTesting, self.bPredict = False,False,False, False
self.lTrain = []
self.lTest = []
self.lPredict= []
self.bAttentionLayer= False
self.bMultiType = False
# mapping vector
self.ltag_vector=[]
def setParams(self,dParams):
"""
"""
if dParams.dirname:
self.dirName = dParams.dirname
if dParams.name:
self.sModelName = dParams.name
if dParams.batchSize:
self.batch_size = dParams.batchSize
if dParams.nbEpochs:
self.nbEpochs = dParams.nbEpochs
if dParams.hidden:
self.hiddenSize = dParams.hidden
if dParams.nbfeatures:
self.max_features = dParams.nbfeatures
if dParams.ngram:
self.maxngram = dParams.ngram
self.bMultiType = dParams.multitype
if dParams.training:
self.lTrain = dParams.training
self.bTraining=True
if dParams.testing:
self.lTest = dParams.testing
self.bTesting=True
if dParams.predict:
self._sent =dParams.predict #.decode('latin-1')
self.bPredict=True
if dParams.attention:
self.bAttentionLayer=True
def initTransformeur(self):
# lowercase = False ?? True by default
self.cv= CountVectorizer( max_features = self.max_features
, analyzer = 'char' ,ngram_range = (1,self.maxngram)
, dtype=np.float64)
self.node_transformer = FeatureUnion([
("ngrams", Pipeline([
('selector', NodeTransformerTextEnclosed()),
('cv', self.cv),
('todense', SparseToDense())
])
)
])
def load_data_Multitype(self,lFName):
"""
load data as training data (x,y)
X Y1 Y2 Y3
Sa B_ABPGermanDateGenerator S_weekDayDateGenerator NoneM
"""
self.lnbClasses = []
self.lClasses = []
nbc = 3
for i in range(0,nbc):
self.lnbClasses.append(0)
self.lClasses.append([])
self.ltag_vector.append({})
lTmp=[]
for fname in lFName:
f=open(fname,encoding='utf-8')
x=[]
iseq = 0
for l in f:
iseq += 1
l = l.strip()
if l[:2] == '# ':continue # comments
if l =='EOS':
lTmp.append(x)
self.max_sentence_len = max(self.max_sentence_len,len(x))
x=[]
iseq = 0
else:
try:
la=l.split('\t')
b1=la[-1]
b2=la[-2]
b3=la[-3]
except ValueError:
#print 'cannot find value and label in: %s'%(l)
continue
assert len(la) != 0
if b1 not in self.lClasses[0]:
self.lClasses[0].append(b1)
if b2 not in self.lClasses[1]:
self.lClasses[1].append(b2)
if b3 not in self.lClasses[2]:
self.lClasses[2].append(b3)
x.append((la[0],(b1,b2,b3)))
if x != []:
lTmp.append(x)
f.close()
for i in [0,1,2]:
self.lnbClasses[i] = len(self.lClasses[i]) + 1
for tag_class_id,b in enumerate(self.lClasses[i]):
one_hot_vec = np.zeros(self.lnbClasses[i], dtype=np.int32)
one_hot_vec[tag_class_id] = 1
self.ltag_vector[i][b] = tuple(one_hot_vec)
self.ltag_vector[i][tuple(one_hot_vec)] = b
# Add nil class
if 'NIL' not in self.ltag_vector[i]:
self.lClasses[i].append('NIL')
one_hot_vec = np.zeros(self.lnbClasses[i], dtype=np.int32)
one_hot_vec[self.lnbClasses[i]-1] = 1
self.ltag_vector[i]['NIL'] = tuple(one_hot_vec)
self.ltag_vector[i][tuple(one_hot_vec)] = 'NIL'
# more than 1 sequence
assert len(lTmp) > 1
# shuffle(lTmp)
lX = []
lY = []
for sample in lTmp:
lX.append(list(map(lambda xy:xy[0],sample)))
lY.append(list(map(lambda xy:xy[1],sample)))
del lTmp
return lX,lY
# def load_data_for_testing_Multitype(self,lFName):
# """
# load data as training data (x,y)
# nbClasses must be known!
# loadModel first!
# """
#
# lTmp=[]
# for fname in lFName:
# f=open(fname,encoding='utf-8')
# x=[]
# for l in f:
# l = l.strip()
# if l[:2] == '# ':continue # comments
# if l =='EOS':
# if x!=[]:
# lTmp.append(x)
# x=[]
# else:
# try:
# la=l.split('\t')
# b1=la[-1].split('_')[0]
# b2=la[-1].split('_')[1]
# except ValueError:
# print('ml:cannot find value and label in: %s'%(l))
# sys.exit()
# assert len(la) != 0
# x.append((la[0],(b1,b2)))
#
# if x != []:
# lTmp.append(x)
# f.close()
#
# lX = []
# lY = []
# for sample in lTmp:
# lX.append(list(map(lambda xy:xy[0],sample)))
# lY.append(list(map(lambda xy:xy[1],sample)))
#
# del lTmp
#
# return lX,lY
def storeModel(self,model, aux):
"""
store model and auxillary data (transformer)
"""
model.save('%s/%s.hd5'%(self.dirName,self.sModelName))
print('model dumped in %s/%s.hd5' % (self.dirName,self.sModelName))
#max_features,max_sentence_len, self.nbClasses,self.tag_vector , node_transformer
pickle.dump((self.bMultiType,self.maxngram,self.max_features,self.max_sentence_len,self.lnbClasses,self.ltag_vector,self.node_transformer),gzip.open('%s/%s.%s'%(self.dirName,self.sModelName,self.sAux),'wb'))
print('aux data dumped in %s/%s.%s' % (self.dirName,self.sModelName,self.sAux))
def loadModels(self):
"""
load models and aux data
"""
if self.bAttentionLayer:
self.model = load_model(os.path.join(self.dirName,self.sModelName+'.hd5'),custom_objects={"AttentionDecoder": AttentionDecoder})
else:
self.model = load_model(os.path.join(self.dirName,self.sModelName+'.hd5'))
print('model loaded: %s/%s.hd5' % (self.dirName,self.sModelName))
try:
self.bMultiType,self.maxngram,self.max_features,self.max_sentence_len, self.lnbClasses,self.ltag_vector , self.node_transformer = pickle.load(gzip.open('%s/%s.%s'%(self.dirName,self.sModelName,self.sAux),'r'))
except:
self.maxngram,self.max_features,self.max_sentence_len, self.lnbClasses,self.ltag_vector , self.node_transformer = pickle.load(gzip.open('%s/%s.%s'%(self.dirName,self.sModelName,self.sAux),'r'))
self.bMultiType = False
print('aux data loaded: %s/%s.%s' % (self.dirName,self.sModelName,self.sAux))
print("ngram: %s\tmaxfea=%s\tpadding=%s\tnbclasses=%s" % (self.maxngram,self.max_features,self.max_sentence_len, self.lnbClasses))
print("multitype model:%s"%(self.bMultiType))
def training_multitype(self,traindata):
"""
training
"""
train_X,_ = traindata #self.load_data(self.lTrain)
self.initTransformeur()
fX= [item for sublist in train_X for item in sublist ]
self.node_transformer.fit(fX)
#
lX,(lY,lY2,lY3) = self.prepareTensor_multitype(traindata)
# print (lX.shape)
# print (lY.shape)
# print (lY2.shape)
inputs = Input(shape=(self.max_sentence_len, self.max_features))
x = Masking(mask_value=0)(inputs)
x = Bidirectional(LSTM(self.hiddenSize,return_sequences = True, dropout=0.5), merge_mode='concat')(x)
# x = TimeDistributed(Dense(self.lnbClasses[0], activation='softmax'))(x)
out1 = TimeDistributed(Dense(self.lnbClasses[0], activation='softmax'),name='M')(x)
out2 = TimeDistributed(Dense(self.lnbClasses[1], activation='softmax'),name='spec')(x)
out3 = TimeDistributed(Dense(self.lnbClasses[2], activation='softmax'),name='gen')(x)
model = Model(input = inputs,output = [out1,out2,out3])
model.compile(loss='categorical_crossentropy', optimizer='rmsprop',metrics=['categorical_accuracy'] )
print (model.summary())
_ = model.fit(lX, [lY,lY2,lY3], epochs = self.nbEpochs,batch_size = self.batch_size, verbose = 1,validation_split = 0.1, shuffle=True)
del lX,lY,lY2,lY3
auxdata = self.max_features,self.max_sentence_len,self.lnbClasses,self.ltag_vector,self.node_transformer
return model, auxdata
def prepareTensor_multitype(self,annotated):
lx,ly = annotated
lX = list()
lY1 = list()
lY2 = list()
lY3 = list()
# = np.array()
for x,y in zip(lx,ly):
words = self.node_transformer.transform(x)
wordsvec = []
elem_tags1 = []
elem_tags2 = []
elem_tags3 = []
for ix,ss in enumerate(words):
wordsvec.append(ss)
elem_tags1.append(list(self.ltag_vector[0][y[ix][0]]))
elem_tags2.append(list(self.ltag_vector[1][y[ix][1]]))
elem_tags3.append(list(self.ltag_vector[2][y[ix][2]]))
nil_X = np.zeros(self.max_features)
nil_Y1 = np.array(self.ltag_vector[0]['NIL'])
nil_Y2 = np.array(self.ltag_vector[1]['NIL'])
nil_Y3 = np.array(self.ltag_vector[2]['NIL'])
pad_length = self.max_sentence_len - len(wordsvec)
lX.append( wordsvec +((pad_length)*[nil_X]) )
lY1.append( elem_tags1 + ((pad_length)*[nil_Y1]) )
lY2.append( elem_tags2 + ((pad_length)*[nil_Y2]) )
lY3.append( elem_tags3 + ((pad_length)*[nil_Y3]) )
del lx
del ly
lX=np.array(lX)
lY1=np.array(lY1)
lY2=np.array(lY2)
lY3=np.array(lY3)
return lX,(lY1,lY2,lY3)
def testModel_Multitype(self,testdata):
"""
test model
"""
lX,(lY,lY2,lY3) = self.prepareTensor_multitype(testdata)
scores = self.model.evaluate(lX,[lY,lY2,lY3],verbose=True)
#print(list(zip(self.model.metrics_names,scores)))
test_x, _ = testdata
y_pred1,y_pred2, y_pred3 = self.model.predict(lX)
for i,_ in enumerate(lX):
for iy,pred_seq in enumerate([y_pred1[i],y_pred2[i],y_pred3[i]]):
pred_tags = []
for class_prs in pred_seq:
class_vec = np.zeros(self.lnbClasses[iy], dtype=np.int32)
class_vec[ np.argmax(class_prs) ] = 1
# print class_prs[class_prs >0.1]
if tuple(class_vec.tolist()) in self.ltag_vector[iy]:
# print(self.tag_vector[tuple(class_vec.tolist())],class_prs[np.argmax(class_prs)])
pred_tags.append((self.tag_vector[tuple(class_vec.tolist())],class_prs[np.argmax(class_prs)]))
print(test_x[i],pred_tags[:len(test_x[i])])
def prepareOutput_multitype(self,lToken,lLTags):
"""
format final output with MultiType
"""
lRes= []
for itok,seq in enumerate(lToken):
print (seq,lLTags[0][itok],lLTags[1][itok],lLTags[2][itok])
tag1,tag2,tag3 = lLTags[0][itok],lLTags[1][itok],lLTags[2][itok]
lRes.append([((itok,itok),seq,tag1,tag2,tag3)])
# lRes.append((toffset,tok,label,list(lScore)))
return lRes
def predict_multiptype(self,lsent):
"""
predict over a set of sentences (unicode)
"""
lRes= []
for mysent in lsent :
if len(mysent.split())> self.max_sentence_len:
print ('max sent length: %s'%self.max_sentence_len)
continue
# allwords= self.node_transformer.transform(mysent.split())
allwords= self.node_transformer.transform(mysent.split())
# print mysent.split()
# n=len(mysent.split())
wordsvec = []
for w in allwords:
wordsvec.append(w)
lX = list()
nil_X = np.zeros(self.max_features)
pad_length = self.max_sentence_len - len(wordsvec)
lX.append( wordsvec +((pad_length)*[nil_X]) )
lX=np.array(lX)
# print(pad_length*[nil_X] + wordsvec, self.max_sentence_len)
# assert pad_length*[nil_X] + wordsvec >= self.max_sentence_len
y_pred1,y_pred2,y_pred3 = self.model.predict(lX)
for i,_ in enumerate(lX):
# pred_seq = y_pred[i]
l_multi_type_results = []
for iy,pred_seq in enumerate([y_pred1[i],y_pred2[i],y_pred3[i]]):
pred_tags = []
pad_length = self.max_sentence_len - len(allwords)
for class_prs in pred_seq:
class_vec = np.zeros(self.lnbClasses[iy], dtype=np.int32)
class_vec[ np.argmax(class_prs) ] = 1
if tuple(class_vec.tolist()) in self.ltag_vector[iy]:
# print (iy,tuple(class_vec.tolist()),self.ltag_vector[iy][tuple(class_vec.tolist())],class_prs[np.argmax(class_prs)])
pred_tags.append((self.ltag_vector[iy][tuple(class_vec.tolist())],class_prs[np.argmax(class_prs)]))
l_multi_type_results.append(pred_tags[:len(allwords)])
# print(mysent,l_multi_type_results)
lRes.append(self.prepareOutput_multitype(mysent.split(),l_multi_type_results))
return lRes
def run(self):
"""
"""
if self.bGridSearch:
pass
# self.gridSearch()
if self.bMultiType and self.bTraining:
lX, lY = self.load_data_Multitype(self.lTrain)
model, other = self.training_multitype((lX,lY))
# store
self.storeModel(model,other)
del lX, lY
del self.node_transformer
del model
if self.bTraining and not self.bMultiType:
lX, lY = self.load_data(self.lTrain)
model, other = self.training((lX,lY))
# store
self.storeModel(model,other)
del lX, lY
del self.node_transformer
del model
if self.bTesting:
self.loadModels()
if self.bMultiType:
lX,lY = self.load_data_for_testing_Multitype(self.lTest)
res = self.testModel_Multitype((lX,lY))
else:
lX,lY = self.load_data_for_testing(self.lTest)
res = self.testModel((lX,lY))
if self.bPredict:
# which input format: [unicode]
self.loadModels()
lsent = [self._sent]
print (lsent)
if self.bMultiType:
lres = self.predict_multiptype(lsent)
else:
lres = self.predict(lsent)
for r in lres:
print (r)
if __name__ == '__main__':
cmp = DeepTagger()
cmp.parser = OptionParser(usage="", version="0.1")
cmp.parser.description = "BiLSTM approach for NER"
cmp.parser.add_option("--name", dest="name", action="store", type="string", help="model name")
cmp.parser.add_option("--dir", dest="dirname", action="store", type="string", help="directory to store model")
cmp.parser.add_option("--training", dest="training", action="append", type="string", help="training data")
cmp.parser.add_option("--ml", dest="multitype", action="store_true",default=False, help="multi type version")
cmp.parser.add_option("--hidden", dest="hidden", action="store", type="int", help="hidden layer dimension")
cmp.parser.add_option("--batch", dest="batchSize", action="store", type="int", help="batch size")
cmp.parser.add_option("--epochs", dest="nbEpochs", action="store", type="int", default=2,help="nb epochs for training")
cmp.parser.add_option("--ngram", dest="ngram", action="store", type="int", default=2,help="ngram size")
cmp.parser.add_option("--nbfeatures", dest="nbfeatures", action="store", type="int",default=128, help="nb features")
cmp.parser.add_option("--testing", dest="testing", action="append", type="string", help="test data")
cmp.parser.add_option("--run", dest="predict", action="store", type="string", help="string to be categorized")
cmp.parser.add_option("--att", dest="attention", action="store_true", default=False, help="add attention layer")
(options, args) = cmp.parser.parse_args()
#Now we are back to the normal programmatic mode, we set the component parameters
cmp.setParams(options)
#This component is quite special since it does not take one XML as input but rather a series of files.
#doc = cmp.loadDom()
doc = cmp.run()
| bsd-3-clause |
akokai/commongroups | commongroups/tests/test_commongroups.py | 1 | 5483 | # -*- coding: utf-8 -*-
"""
Test suite for commongroups program architecture.
For tests of database query logic, stay tuned...
Depends:
Home environment is configured; structure-searchable database exists and
PostgreSQL is running; Google Sheets access is configured.
Side-effects:
Creates directories and log files.
"""
# pylint: disable=invalid-name,missing-docstring
from itertools import islice
import json
import os
from os.path import exists, join as pjoin
from pkg_resources import resource_filename, resource_string
from pandas import DataFrame
import pytest
from sqlalchemy.engine import Engine
from sqlalchemy.sql import Select
from commongroups.cmgroup import CMGroup
from commongroups.env import CommonEnv
from commongroups.errors import MissingParamError, NoCredentialsError
from commongroups.hypertext import directory
from commongroups.googlesheet import SheetManager
from commongroups.ops import (batch_process,
cmgs_from_file,
cmgs_from_googlesheet,
collect_to_json)
from commongroups.query import QueryMethod, get_query_results
PARAMS_JSON = resource_filename(__name__, 'params.json')
LOCAL_PARAMS = json.loads(resource_string(__name__, 'params.json').decode())
PAR_FAIL_QM = {'cmg_id': 'x666666', 'name': 'Incomplete parameters'}
TEST_LIMIT = 5
# Instantiate a few objects to run multiple tests on:
env = CommonEnv('test', google_worksheet='test')
env.connect_database()
blank_env = CommonEnv(env_path=env.results_path)
# Define a few generic helper functions for the tests.
def check_params(params):
"""
Verify that group parameters read from file or Google Sheet are OK.
The argument should be a params data structure for creating a single group,
not a list of parameters for many group.
"""
assert isinstance(params, dict)
assert 'params' in params.keys()
assert 'info' in params.keys()
assert 'notes' in params['info'].keys()
def check_cmg(cmg):
assert isinstance(cmg, CMGroup)
assert isinstance(cmg.cmg_id, str)
assert isinstance(cmg.params, dict)
assert isinstance(cmg.info, dict)
assert 'notes' in cmg.info
cmg.add_info({'Added info': 'Success!'})
assert 'Added info' in cmg.info
# Tests:
def test_env_config():
assert env.config['google_worksheet'] == 'test'
assert len(blank_env.config) == 0
with pytest.raises(MissingParamError):
blank_env.connect_database()
with pytest.raises(MissingParamError):
gen = cmgs_from_googlesheet(blank_env)
def test_env_db():
env.connect_database()
assert isinstance(env.database, Engine)
def test_cmgs_IO():
for params in LOCAL_PARAMS:
check_params(params)
cmg_gen_json = cmgs_from_file(env, PARAMS_JSON)
cmgs = list(islice(cmg_gen_json, None))
assert len(cmgs) > 2
for cmg in cmgs:
check_cmg(cmg)
coll_json_path = pjoin(env.results_path, 'cmgroups.json')
collect_to_json(cmgs, env)
assert exists(coll_json_path)
html_dir_path = pjoin(env.results_path, 'html', 'index.html')
directory(cmgs, env)
assert exists(html_dir_path)
def test_googlesheet():
with pytest.raises(NoCredentialsError):
gsm = SheetManager('Untitled', 'Sheet 1', 'KEYFILE.DNE')
sheet = SheetManager(env.config['google_sheet_title'],
env.config['google_worksheet'],
env.config['google_key_file'])
google_params = list(islice(sheet.get_params(), None))
for params in google_params:
check_params(params)
cmg_gen = cmgs_from_googlesheet(env)
cmgs = list(islice(cmg_gen, None))
assert len(cmgs) > 2
for cmg in cmgs:
check_cmg(cmg)
path = pjoin(env.results_path, 'google_params.json')
sheet.params_to_json(path)
assert exists(path)
def test_querymethod():
for params in [PAR_FAIL_QM, ]:
with pytest.raises(MissingParamError):
bad_qmd = QueryMethod(params)
qmd = QueryMethod(LOCAL_PARAMS[0]['params'])
assert isinstance(qmd.get_literal(), str)
assert isinstance(qmd.expression, Select)
qmd.expression = qmd.expression.limit(TEST_LIMIT)
res = get_query_results(qmd.expression, env.database)
assert isinstance(res, DataFrame)
assert len(res) == TEST_LIMIT
def test_cmg_process():
cmg = CMGroup(env, LOCAL_PARAMS[0]['params'], LOCAL_PARAMS[0]['info'])
cmg.create_query()
assert isinstance(cmg.query, QueryMethod)
assert isinstance(cmg.query.get_literal(), str)
assert isinstance(cmg.query.expression, Select)
# Use the process() method
cmg.process(env.database)
assert isinstance(cmg.compounds, DataFrame)
assert isinstance(cmg.info, dict)
assert 'about' in cmg.info
assert isinstance(cmg.info['count'], int)
assert cmg.info['sql'] == cmg.query.get_literal()
cmg.to_json()
cmg.to_html(formats=['json'])
def test_batch_process():
cmg_gen = cmgs_from_file(env, PARAMS_JSON)
cmgs_done = batch_process(cmg_gen, env)
for cmg in cmgs_done:
check_cmg(cmg)
assert exists(
pjoin(cmg.results_path, '{}.json'.format(cmg.cmg_id))
)
assert exists(
pjoin(cmg.results_path, '{}.xlsx'.format(cmg.cmg_id))
)
assert exists(
pjoin(cmg.results_path, 'html', '{}.html'.format(cmg.cmg_id))
)
assert exists(pjoin(env.results_path, 'html', 'index.html'))
| mit |
HBNLdev/DataStore | db/knowledge/questionnaires.py | 1 | 12097 | ''' knowledge about questionnaire data and attributes of their CSV files
as they are found on the zork website. change over time as needed. '''
import os
from db.utils import files as fU# defining maximum followups
most_recent_distribution = 103
max_fups = 7
# definitions of zork URLs
base_url = 'https://zork5.wustl.edu/coganew/data/available_data'
# updated often!
core_url = '/pheno_all/core_pheno_20190710.zip'
allrels_file = 'allrelsweb201906.sas7bdat.csv'
# updated semi-often
cal_url = '/Phase_IV/CalFoms01.26.2017_sas.zip'
# not updated in a while
ach_url = '/Phase_IV/Achenbach%20January%202016%20Distribution.zip'
fam_url = '/family_data/allfam_sas_3-20-12.zip'
vcu_url = '/vcu_ext_pheno/vcu_ext_all_121112_sas.zip'
# definitions of locations in filesystem
harmonization_path = '/processed_data/zork/harmonization/harmonization-combined-format.csv'
zork_p123_path = '/processed_data/zork/zork-phase123/'
p123_master_path = zork_p123_path + 'subject/master/master.sas7bdat.csv'
zork_p4_path = '/processed_data/zork/zork-phase4-' + str(most_recent_distribution) + '/'
p4_master_path = fU.match_files(os.path.join(zork_p4_path,'subject/master'),
beginning='master4',end='sas7bdat.csv')[0]
# can stay at 69 if they haven't changed
internalizing_dir = '/processed_data/zork/zork-phase4-69/subject/internalizing/'
internalizing_file = 'INT_Scale_All-Total-Scores_n11281.csv'
externalizing_dir = '/processed_data/zork/zork-phase4-94/subject/vcuext/'
externalizing_file = 'vcu_ext_all_121112.sas7bdat.csv'
fham_file = 'bigfham4.sas7bdat.csv'
# master file columns
p1_cols = ['SSAGA_DT', 'CSAGA_DT', 'CSAGC_DT', 'FHAM_DT', 'TPQ_DT',
'ZUCK_DT', 'ERP_DT', ]
p2_cols = ['SAGA2_DT', 'CSGA2_DT', 'CSGC2_DT', 'ERP2_DT', 'FHAM2_DT',
'AEQ_DT', 'AEQA_DT', 'QSCL_DT', 'DAILY_DT', 'NEO_DT',
'SRE_DT', 'SSSC_DT']
p3_cols = ['SAGA3_DT', 'CSGA3_DT', 'CSGC3_DT', 'ERP3_DT', 'FHAM3_DT',
'AEQ3_DT', 'AEQA3_DT', 'QSCL3_DT', 'DLY3_DT', 'NEO3_DT',
'SRE3_DT', 'SSSC3_DT']
p4_col_prefixes = ['aeqg', 'aeqy', 'crv', 'cssaga', 'dp', 'hass',
'neo', 'ssaga', 'sssc', 'ssv']
# note we defaultly use this dateformat because pandas sniffs to this format
def_info = {'date_lbl': ['ADM_Y', 'ADM_M', 'ADM_D'],
'na_val': '',
'dateform': '%Y-%m-%d',
'file_ext': '.sas7bdat.csv',
# 'file_ext': '.sas7bdat',
'max_fups': max_fups,
'id_lbl': 'ind_id',
'capitalize': False,
}
# still missing: cal
# for non-ssaga questionnaires, if there are multiple file_pfixes,
# the files are assumed to be basically non-overlapping in terms of individuals
# (one for adults, and one for adolescents)
# for daily, dependence, and sensation should capitalize on import for phase4
map_ph4 = {
'achenbach': {'file_pfixes': ['asr4', 'ysr4'],
'zip_name': 'Achenbach',
'date_lbl': 'datefilled',
'drop_keys': ['af_', 'bp_'],
'zork_url': ach_url},
'aeq': {'file_pfixes': ['aeqascore4', 'aeqscore4'],
'zip_name': 'aeq4',
'zork_url': '/Phase_IV/aeq4.zip'},
'bis': {'file_pfixes': ['bis_a_score4', 'bis_score4'],
'zip_name': 'biq4',
'zork_url': '/Phase_IV/biq4.zip'},
'cal': {'file_pfixes': ['scored'],
'zip_name': 'CAL',
'zork_url': cal_url},
'craving': {'file_pfixes': ['crv4'],
'zip_name': 'crv',
'zork_url': '/Phase_IV/crv4.zip'},
'daily': {'file_pfixes': ['daily4'],
'zip_name': 'daily',
'zork_url': '/Phase_IV/daily4.zip',
'capitalize': True},
'dependence': {'file_pfixes': ['dpndnce4'],
'zip_name': 'dpndnce',
'zork_url': '/Phase_IV/dpndnce4.zip',
'capitalize': True},
'neo': {'file_pfixes': ['neo4'],
'zip_name': 'neo',
'zork_url': '/Phase_IV/neo4.zip'},
'sensation': {'file_pfixes': ['ssvscore4'],
'zip_name': 'ssv',
'zork_url': '/Phase_IV/ssvscore4.zip',
'capitalize': True},
'sre': {'file_pfixes': ['sre_score4'],
'zip_name': 'sre4',
'zork_url': '/Phase_IV/sre4.zip'},
}
# for ssaga questionnaires, the multiple file_fpixes are perfectly overlapping,
# so we end up joining them
# capitalize all DX on import
map_ph4_ssaga = {
'cssaga': {'file_pfixes': ['cssaga4', 'dx_cssaga4'],
'date_lbl': 'IntvDate',
'id_lbl': 'IND_ID',
'zip_name': 'cssaga_dx',
'zork_url': '/Phase_IV/cssaga_dx.zip'},
'pssaga': {'file_pfixes': ['pssaga4', 'dx_pssaga4'],
'date_lbl': 'IntvDate',
'id_lbl': 'ind_id',
'zip_name': 'cssagap_dx',
'zork_url': '/Phase_IV/cssagap_dx.zip'},
'ssaga': {'file_pfixes': ['ssaga4', 'dx_ssaga4'],
'date_lbl': 'IntvDate',
'id_lbl': 'IND_ID',
'zip_name': 'ssaga_dx',
'zork_url': '/Phase_IV/ssaga_dx.zip'}
}
# for subject-specific info, used by quest_retrieval.py
map_subject = {'core': {'file_pfixes': 'core',
'zip_name': 'core',
'zork_url': core_url},
'fams': {'file_pfixes': 'allfamilies',
'zip_name': 'allfam',
'zork_url': fam_url},
'fham': {'file_pfixes': 'bigfham4',
'zip_name': 'bigfham4',
'zork_url': '/Phase_IV/bigfham4.zip'},
'rels': {'file_pfixes': 'all_rels',
'zip_name': 'allrels',
'zork_url': '/family_data/allrels_sas.zip'},
'vcuext': {'file_pfixes': ['vcu'],
'zip_name': 'vcu',
'zork_url': vcu_url},
'master': {'file_pfixes': 'master4',
'zip_name': 'master4',
'zork_url': '/Phase_IV/master4_sas.zip'}
}
# note these have variegated date labels!
# for aeq, the score is not available for phases <4
# for sensation, the score is not available for phase 2
map_ph123 = {'aeq': {'file_pfixes': ['aeq', 'aeqa', 'aeq3', 'aeqa3'],
'followup': {'aeq': 'p2', 'aeq3': 'p3', 'aeqa': 'p2', 'aeqa3': 'p3'},
'date_lbl': {'aeq': 'AEQ_DT', 'aeqa': 'AEQA_DT', 'aeq3': 'AEQ3_DT', 'aeqa3': 'AEQA3_DT'},
'id_lbl': 'IND_ID'},
'craving': {'file_pfixes': ['craving', 'craving3'],
'followup': {'craving': 'p2', 'craving3': 'p3', },
'date_lbl': {'craving': 'QSCL_DT', 'craving3': 'QSCL3_DT'},
'id_lbl': 'IND_ID'},
'daily': {'file_pfixes': ['daily', 'daily3'],
'followup': {'daily': 'p2', 'daily3': 'p3', },
'date_lbl': {'daily': 'DAILY_DT', 'daily3': 'DLY3_DT'},
'id_lbl': 'IND_ID'},
'dependence': {'file_pfixes': ['dpndnce', 'dpndnce3'],
'followup': {'dpndnce': 'p2', 'dpndnce3': 'p3', },
'date_lbl': {'dpndnce': 'QSCL_DT', 'dpndnce3': 'QSCL3_DT'},
'id_lbl': 'IND_ID'},
'neo': {'file_pfixes': ['neo', 'neo3'],
'followup': {'neo': 'p2', 'neo3': 'p3', },
'date_lbl': {'neo': 'NEO_DT', 'neo3': 'NEO3_DT'},
'id_lbl': 'IND_ID'},
'sensation': {'file_pfixes': ['sssc', 'ssvscore', 'sssc3'],
'followup': {'sssc': 'p2', 'sssc3': 'p3', 'ssvscore': 'p3'},
'date_lbl': {'sssc': 'SSSC_DT', 'sssc3': 'SSSC3_DT', 'ssvscore': 'ZUCK_DT'},
'id_lbl': 'IND_ID'},
'sre': {'file_pfixes': ['sre', 'sre3'],
'followup': {'sre': 'p2', 'sre3': 'p3', },
'date_lbl': {'sre': 'SRE_DT', 'sre3': 'SRE3_DT'},
'id_lbl': 'IND_ID'},
}
map_ph123_ssaga = {'cssaga': {'file_pfixes': ['cssaga', 'csaga2', 'csaga3', 'dx_csaga', 'dx_csag2', 'dx_csag3'],
'followup': {'cssaga': 'p1', 'csaga2': 'p2', 'csaga3': 'p3',
'dx_csaga': 'p1', 'dx_csag2': 'p2', 'dx_csag3': 'p3'},
'date_lbl': {'cssaga': 'CSAGA_COMB_DT', 'csaga2': 'CSAG2_DT', 'csaga3': 'CSAG2_DT',
'dx_csaga': None, 'dx_csag2': None, 'dx_csag3': None},
'joindate_from': {'dx_csaga': 'cssaga', 'dx_csag2': 'csaga2', 'dx_csag3': 'csaga3'},
'id_lbl': 'IND_ID',
'dateform': '%m/%d/%Y', },
'pssaga': {'file_pfixes': ['pssaga', 'psaga2', 'psaga3', 'dx_psaga', 'dx_psag2', 'dx_psag3'],
'followup': {'pssaga': 'p1', 'psaga2': 'p2', 'psaga3': 'p3',
'dx_psaga': 'p1', 'dx_psag2': 'p2', 'dx_psag3': 'p3'},
'date_lbl': {'pssaga': 'CSAGP_DT', 'psaga2': 'CSGP2_DT', 'psaga3': 'CSGP2_DT',
'dx_psaga': None, 'dx_psag2': None, 'dx_psag3': None},
'joindate_from': {'dx_psaga': 'pssaga', 'dx_psag2': 'psaga2', 'dx_psag3': 'psaga3'},
'id_lbl': 'IND_ID',
'dateform': '%m/%d/%Y', },
'ssaga': {'file_pfixes': ['ssaga', 'ssaga2', 'ssaga3', 'dx_ssaga', 'dx_saga2rv', 'dx_saga3rv'],
'followup': {'ssaga': 'p1', 'ssaga2': 'p2', 'ssaga3': 'p3',
'dx_ssaga': 'p1', 'dx_saga2rv': 'p2', 'dx_saga3rv': 'p3'},
'date_lbl': {'ssaga': None, 'ssaga2': None, 'ssaga3': None,
'dx_ssaga': None, 'dx_saga2rv': None, 'dx_saga3rv': None},
'joindate_lbl': {'ssaga': 'SSAGA_DT', 'ssaga2': 'SAGA2_DT', 'ssaga3': 'SAGA3_DT',
'dx_ssaga': 'SSAGA_DT', 'dx_saga2rv': 'SAGA2_DT',
'dx_saga3rv': 'SAGA3_DT'},
'joindate_from': {'ssaga': None, 'ssaga2': None, 'ssaga3': None,
'dx_ssaga': None, 'dx_saga2rv': None, 'dx_saga3rv': None},
'id_lbl': 'IND_ID',
'dateform': '%m/%d/%Y', }
}
map_ph123_ssaga['ssaga']['joindate_from'] = {k: p123_master_path for k in map_ph123_ssaga['ssaga']['date_lbl'].keys()}
HEvars_interest = ['HE1', 'HE14_1', 'HE14_2', 'HE14_3', 'HE14_4', 'HE14_5', 'HE14_6',
'HE14_7', 'HE15_1', 'HE15_2', 'HE15_3', 'HE15_4', 'HE15_5', 'HE15_6',
'HE15_7', 'HE1BoxF', 'HE1BoxM', 'HE24', 'HE25', 'HE26', 'HE27', 'HE27a',
'HE28', 'HE37a', 'HE37b', 'HE37c', 'HE41a', 'HE41b', 'HE42a', 'HE42b',
'HE43a', 'HE43b', 'HE43c', 'HE43d', 'HE44', 'HE44a', 'HE44b', 'HE44c',
'HE44d', 'HE45a', 'HE45b', 'HE45c', 'HE45d', 'HE46', 'HE46a', 'HE46b',
'HE47a', 'HE47b', 'HE47c', 'HE47d', 'HE48a', 'HE48b', 'HE48c', 'HE48d',
'HE51', 'HE51a', 'HE51b', 'HEF19', 'HEF34', 'HEM17b', 'HEM33', 'HEf13',
'HEf17a1', 'HEf17a2', 'HEf17a3', 'HEf17a4', 'HEf17a5', 'HEf17a6',
'HEf17b', 'HEf18a', 'HEf18b', 'HEf1a', 'HEf1b', 'HEf20a', 'HEf20b',
'HEf29a', 'HEf29b', 'HEf30', 'HEf31', 'HEf33', 'HEf36', 'HEf38',
'HEm13', 'HEm17a1', 'HEm17a2', 'HEm17a3', 'HEm17a4', 'HEm17a5',
'HEm17a6', 'HEm18a', 'HEm18b', 'HEm19', 'HEm1a', 'HEm1b', 'HEm20a',
'HEm20b', 'HEm29a', 'HEm29b', 'HEm30', 'HEm31', 'HEm34', 'HEm36','HEm38'] | gpl-3.0 |
qPCR4vir/orange3 | Orange/widgets/data/owdatasampler.py | 1 | 14226 | import sys
import math
from PyQt4 import QtGui
from PyQt4.QtCore import Qt
import numpy as np
import sklearn.cross_validation as skl_cross_validation
from Orange.widgets import widget, gui
from Orange.widgets.settings import Setting
from Orange.data import Table
from Orange.data.sql.table import SqlTable
class OWDataSampler(widget.OWWidget):
name = "Data Sampler"
description = "Randomly draw a subset of data points " \
"from the input data set."
icon = "icons/DataSampler.svg"
priority = 100
category = "Data"
keywords = ["data", "sample"]
inputs = [("Data", Table, "set_data")]
outputs = [("Data Sample", Table, widget.Default),
("Remaining Data", Table)]
want_main_area = False
resizing_enabled = False
RandomSeed = 42
FixedProportion, FixedSize, CrossValidation, Bootstrap = range(4)
SqlTime, SqlProportion = range(2)
use_seed = Setting(False)
replacement = Setting(False)
stratify = Setting(False)
sql_dl = Setting(False)
sampling_type = Setting(FixedProportion)
sampleSizeNumber = Setting(1)
sampleSizePercentage = Setting(70)
sampleSizeSqlTime = Setting(1)
sampleSizeSqlPercentage = Setting(0.1)
number_of_folds = Setting(10)
selectedFold = Setting(1)
def __init__(self):
super().__init__()
self.data = None
self.indices = None
self.sampled_instances = self.remaining_instances = None
box = gui.vBox(self.controlArea, "Information")
self.dataInfoLabel = gui.widgetLabel(box, 'No data on input.')
self.outputInfoLabel = gui.widgetLabel(box, ' ')
self.sampling_box = gui.vBox(self.controlArea, "Sampling Type")
sampling = gui.radioButtons(self.sampling_box, self, "sampling_type",
callback=self.sampling_type_changed)
def set_sampling_type(i):
def f():
self.sampling_type = i
self.sampling_type_changed()
return f
gui.appendRadioButton(sampling, "Fixed proportion of data:")
self.sampleSizePercentageSlider = gui.hSlider(
gui.indentedBox(sampling), self,
"sampleSizePercentage",
minValue=0, maxValue=99, ticks=10, labelFormat="%d %%",
callback=set_sampling_type(self.FixedProportion),
addSpace=12)
gui.appendRadioButton(sampling, "Fixed sample size")
ibox = gui.indentedBox(sampling)
self.sampleSizeSpin = gui.spin(
ibox, self, "sampleSizeNumber", label="Instances: ",
minv=1, maxv=2 ** 31 - 1,
callback=set_sampling_type(self.FixedSize))
gui.checkBox(
ibox, self, "replacement", "Sample with replacement",
callback=set_sampling_type(self.FixedSize),
addSpace=12)
gui.appendRadioButton(sampling, "Cross validation")
form = QtGui.QFormLayout(
formAlignment=Qt.AlignLeft | Qt.AlignTop,
labelAlignment=Qt.AlignLeft,
fieldGrowthPolicy=QtGui.QFormLayout.AllNonFixedFieldsGrow)
ibox = gui.indentedBox(sampling, addSpace=True, orientation=form)
form.addRow("Number of folds:",
gui.spin(
ibox, self, "number_of_folds", 2, 100,
addToLayout=False,
callback=self.number_of_folds_changed))
self.selected_fold_spin = gui.spin(
ibox, self, "selectedFold", 1, self.number_of_folds,
addToLayout=False, callback=self.fold_changed)
form.addRow("Selected fold:", self.selected_fold_spin)
gui.appendRadioButton(sampling, "Boostrap")
self.sql_box = gui.vBox(self.controlArea, "Sampling Type")
sampling = gui.radioButtons(self.sql_box, self, "sampling_type",
callback=self.sampling_type_changed)
gui.appendRadioButton(sampling, "Time:")
ibox = gui.indentedBox(sampling)
spin = gui.spin(ibox, self, "sampleSizeSqlTime", minv=1, maxv=3600,
callback=set_sampling_type(self.SqlTime))
spin.setSuffix(" sec")
gui.appendRadioButton(sampling, "Percentage")
ibox = gui.indentedBox(sampling)
spin = gui.spin(ibox, self, "sampleSizeSqlPercentage", spinType=float,
minv=0.0001, maxv=100, step=0.1, decimals=4,
callback=set_sampling_type(self.SqlProportion))
spin.setSuffix(" %")
self.sql_box.setVisible(False)
self.options_box = gui.vBox(self.controlArea, "Options")
self.cb_seed = gui.checkBox(
self.options_box, self, "use_seed",
"Replicable (deterministic) sampling",
callback=self.settings_changed)
self.cb_stratify = gui.checkBox(
self.options_box, self, "stratify",
"Stratify sample (when possible)", callback=self.settings_changed)
self.cb_sql_dl = gui.checkBox(
self.options_box, self, "sql_dl", "Download data to local memory",
callback=self.settings_changed)
self.cb_sql_dl.setVisible(False)
gui.button(self.buttonsArea, self, "Sample Data",
callback=self.commit)
def sampling_type_changed(self):
self.settings_changed()
def number_of_folds_changed(self):
self.selected_fold_spin.setMaximum(self.number_of_folds)
self.sampling_type = self.CrossValidation
self.settings_changed()
def fold_changed(self):
# a separate callback - if we decide to cache indices
self.sampling_type = self.CrossValidation
def settings_changed(self):
self.indices = None
def set_data(self, dataset):
self.data = dataset
if dataset is not None:
sql = isinstance(dataset, SqlTable)
self.sampling_box.setVisible(not sql)
self.sql_box.setVisible(sql)
self.cb_seed.setVisible(not sql)
self.cb_stratify.setVisible(not sql)
self.cb_sql_dl.setVisible(sql)
self.dataInfoLabel.setText(
'{}{} instances in input data set.'.format(*(
('~', dataset.approx_len()) if sql else
('', len(dataset)))))
if not sql:
self.sampleSizeSpin.setMaximum(len(dataset))
self.updateindices()
else:
self.dataInfoLabel.setText('No data on input.')
self.outputInfoLabel.setText('')
self.indices = None
self.commit()
def commit(self):
if self.data is None:
sample = other = None
self.sampled_instances = self.remaining_instances = None
self.outputInfoLabel.setText("")
elif isinstance(self.data, SqlTable):
other = None
if self.sampling_type == self.SqlProportion:
sample = self.data.sample_percentage(
self.sampleSizeSqlPercentage, no_cache=True)
else:
sample = self.data.sample_time(
self.sampleSizeSqlTime, no_cache=True)
if self.sql_dl:
sample.download_data()
sample = Table(sample)
else:
if self.indices is None or not self.use_seed:
self.updateindices()
if self.indices is None:
return
if self.sampling_type in (
self.FixedProportion, self.FixedSize, self.Bootstrap):
remaining, sample = self.indices
self.outputInfoLabel.setText(
'Outputting %d instance%s.' %
(len(sample), "s" * (len(sample) != 1)))
elif self.sampling_type == self.CrossValidation:
remaining, sample = self.indices[self.selectedFold - 1]
self.outputInfoLabel.setText(
'Outputting fold %d, %d instance%s.' %
(self.selectedFold, len(sample), "s" * (len(sample) != 1))
)
sample = self.data[sample]
other = self.data[remaining]
self.sampled_instances = len(sample)
self.remaining_instances = len(other)
self.send("Data Sample", sample)
self.send("Remaining Data", other)
def updateindices(self):
err_msg = ""
repl = True
data_length = len(self.data)
num_classes = len(self.data.domain.class_var.values) \
if self.data.domain.has_discrete_class else 0
size = None
if self.sampling_type == self.FixedSize:
size = self.sampleSizeNumber
repl = self.replacement
elif self.sampling_type == self.FixedProportion:
size = np.ceil(self.sampleSizePercentage / 100 * data_length)
repl = False
elif self.sampling_type == self.CrossValidation:
if data_length < self.number_of_folds:
err_msg = "Number of folds exceeds the data size"
else:
assert self.sampling_type == self.Bootstrap
if not repl and size is not None and (data_length <= size):
err_msg = "Sample must be smaller than data"
if not repl and data_length <= num_classes and self.stratify:
err_msg = "Not enough data for stratified sampling"
self.error(0)
if err_msg:
self.error(err_msg)
self.indices = None
return
rnd = self.RandomSeed if self.use_seed else None
stratified = (self.stratify and
type(self.data) == Table and
self.data.domain.has_discrete_class)
if self.sampling_type == self.FixedSize:
self.indices = sample_random_n(
self.data, size,
stratified=stratified, replace=self.replacement,
random_state=rnd)
elif self.sampling_type == self.FixedProportion:
self.indices = sample_random_p(
self.data, self.sampleSizePercentage / 100,
stratified=stratified, random_state=rnd)
elif self.sampling_type == self.Bootstrap:
self.indices = sample_bootstrap(data_length, random_state=rnd)
else:
self.indices = sample_fold_indices(
self.data, self.number_of_folds, stratified=stratified,
random_state=rnd)
def send_report(self):
if self.sampling_type == self.FixedProportion:
tpe = "Random sample with {} % of data".format(
self.sampleSizePercentage)
elif self.sampling_type == self.FixedSize:
if self.sampleSizeNumber == 1:
tpe = "Random data instance"
else:
tpe = "Random sample with {} data instances".format(
self.sampleSizeNumber)
if self.replacement:
tpe += ", with replacement"
elif self.sampling_type == self.CrossValidation:
tpe = "Fold {} of {}-fold cross-validation".format(
self.selectedFold, self.number_of_folds)
else:
tpe = "Undefined" # should not come here at all
if self.stratify:
tpe += ", stratified (if possible)"
if self.use_seed:
tpe += ", deterministic"
items = [("Sampling type", tpe)]
if self.sampled_instances is not None:
items += [
("Input", "{} instances".format(len(self.data))),
("Sample", "{} instances".format(self.sampled_instances)),
("Remaining", "{} instances".format(self.remaining_instances)),
]
self.report_items(items)
def sample_fold_indices(table, folds=10, stratified=False, random_state=None):
"""
:param Orange.data.Table table:
:param int folds: Number of folds
:param bool stratified: Return stratified indices (if applicable).
:param Random random_state:
:rval tuple-of-arrays: A tuple of array indices one for each fold.
"""
if stratified and table.domain.has_discrete_class:
# XXX: StratifiedKFold does not support random_state
ind = skl_cross_validation.StratifiedKFold(
table.Y.ravel(), folds, random_state=random_state)
else:
ind = skl_cross_validation.KFold(
len(table), folds, shuffle=True, random_state=random_state)
return tuple(ind)
def sample_random_n(table, n, stratified=False, replace=False,
random_state=None):
if replace:
if random_state is None:
rgen = np.random
else:
rgen = np.random.mtrand.RandomState(random_state)
sample = rgen.random_integers(0, len(table) - 1, n)
o = np.ones(len(table))
o[sample] = 0
others = np.nonzero(o)[0]
return others, sample
if stratified and table.domain.has_discrete_class:
test_size = max(len(table.domain.class_var.values), n)
ind = skl_cross_validation.StratifiedShuffleSplit(
table.Y.ravel(), n_iter=1,
test_size=test_size, train_size=len(table) - test_size,
random_state=random_state)
else:
ind = skl_cross_validation.ShuffleSplit(
len(table), n_iter=1,
test_size=n, random_state=random_state)
return next(iter(ind))
def sample_random_p(table, p, stratified=False, random_state=None):
n = int(math.ceil(len(table) * p))
return sample_random_n(table, n, stratified, False, random_state)
def sample_bootstrap(size, random_state=None):
rgen = np.random.RandomState(random_state)
sample = rgen.randint(0, size, size)
sample.sort() # not needed for the code below, just for the user
insample = np.ones((size,), dtype=np.bool)
insample[sample] = False
remaining = np.flatnonzero(insample)
return remaining, sample
def test_main():
app = QtGui.QApplication([])
data = Table("iris")
w = OWDataSampler()
w.set_data(data)
w.show()
return app.exec_()
if __name__ == "__main__":
sys.exit(test_main())
| bsd-2-clause |
asimshankar/tensorflow | tensorflow/contrib/timeseries/examples/lstm.py | 17 | 13869 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A more advanced example, of building an RNN-based time series model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from os import path
import tempfile
import numpy
import tensorflow as tf
from tensorflow.contrib.timeseries.python.timeseries import estimators as ts_estimators
from tensorflow.contrib.timeseries.python.timeseries import model as ts_model
from tensorflow.contrib.timeseries.python.timeseries import state_management
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_periods.csv")
class _LSTMModel(ts_model.SequentialTimeSeriesModel):
"""A time series model-building example using an RNNCell."""
def __init__(self, num_units, num_features, exogenous_feature_columns=None,
dtype=tf.float32):
"""Initialize/configure the model object.
Note that we do not start graph building here. Rather, this object is a
configurable factory for TensorFlow graphs which are run by an Estimator.
Args:
num_units: The number of units in the model's LSTMCell.
num_features: The dimensionality of the time series (features per
timestep).
exogenous_feature_columns: A list of `tf.feature_column`s representing
features which are inputs to the model but are not predicted by
it. These must then be present for training, evaluation, and
prediction.
dtype: The floating point data type to use.
"""
super(_LSTMModel, self).__init__(
# Pre-register the metrics we'll be outputting (just a mean here).
train_output_names=["mean"],
predict_output_names=["mean"],
num_features=num_features,
exogenous_feature_columns=exogenous_feature_columns,
dtype=dtype)
self._num_units = num_units
# Filled in by initialize_graph()
self._lstm_cell = None
self._lstm_cell_run = None
self._predict_from_lstm_output = None
def initialize_graph(self, input_statistics=None):
"""Save templates for components, which can then be used repeatedly.
This method is called every time a new graph is created. It's safe to start
adding ops to the current default graph here, but the graph should be
constructed from scratch.
Args:
input_statistics: A math_utils.InputStatistics object.
"""
super(_LSTMModel, self).initialize_graph(input_statistics=input_statistics)
with tf.variable_scope("", use_resource=True):
# Use ResourceVariables to avoid race conditions.
self._lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=self._num_units)
# Create templates so we don't have to worry about variable reuse.
self._lstm_cell_run = tf.make_template(
name_="lstm_cell",
func_=self._lstm_cell,
create_scope_now_=True)
# Transforms LSTM output into mean predictions.
self._predict_from_lstm_output = tf.make_template(
name_="predict_from_lstm_output",
func_=functools.partial(tf.layers.dense, units=self.num_features),
create_scope_now_=True)
def get_start_state(self):
"""Return initial state for the time series model."""
return (
# Keeps track of the time associated with this state for error checking.
tf.zeros([], dtype=tf.int64),
# The previous observation or prediction.
tf.zeros([self.num_features], dtype=self.dtype),
# The most recently seen exogenous features.
tf.zeros(self._get_exogenous_embedding_shape(), dtype=self.dtype),
# The state of the RNNCell (batch dimension removed since this parent
# class will broadcast).
[tf.squeeze(state_element, axis=0)
for state_element
in self._lstm_cell.zero_state(batch_size=1, dtype=self.dtype)])
def _filtering_step(self, current_times, current_values, state, predictions):
"""Update model state based on observations.
Note that we don't do much here aside from computing a loss. In this case
it's easier to update the RNN state in _prediction_step, since that covers
running the RNN both on observations (from this method) and our own
predictions. This distinction can be important for probabilistic models,
where repeatedly predicting without filtering should lead to low-confidence
predictions.
Args:
current_times: A [batch size] integer Tensor.
current_values: A [batch size, self.num_features] floating point Tensor
with new observations.
state: The model's state tuple.
predictions: The output of the previous `_prediction_step`.
Returns:
A tuple of new state and a predictions dictionary updated to include a
loss (note that we could also return other measures of goodness of fit,
although only "loss" will be optimized).
"""
state_from_time, prediction, exogenous, lstm_state = state
with tf.control_dependencies(
[tf.assert_equal(current_times, state_from_time)]):
# Subtract the mean and divide by the variance of the series. Slightly
# more efficient if done for a whole window (using the normalize_features
# argument to SequentialTimeSeriesModel).
transformed_values = self._scale_data(current_values)
# Use mean squared error across features for the loss.
predictions["loss"] = tf.reduce_mean(
(prediction - transformed_values) ** 2, axis=-1)
# Keep track of the new observation in model state. It won't be run
# through the LSTM until the next _imputation_step.
new_state_tuple = (current_times, transformed_values,
exogenous, lstm_state)
return (new_state_tuple, predictions)
def _prediction_step(self, current_times, state):
"""Advance the RNN state using a previous observation or prediction."""
_, previous_observation_or_prediction, exogenous, lstm_state = state
# Update LSTM state based on the most recent exogenous and endogenous
# features.
inputs = tf.concat([previous_observation_or_prediction, exogenous],
axis=-1)
lstm_output, new_lstm_state = self._lstm_cell_run(
inputs=inputs, state=lstm_state)
next_prediction = self._predict_from_lstm_output(lstm_output)
new_state_tuple = (current_times, next_prediction,
exogenous, new_lstm_state)
return new_state_tuple, {"mean": self._scale_back_data(next_prediction)}
def _imputation_step(self, current_times, state):
"""Advance model state across a gap."""
# Does not do anything special if we're jumping across a gap. More advanced
# models, especially probabilistic ones, would want a special case that
# depends on the gap size.
return state
def _exogenous_input_step(
self, current_times, current_exogenous_regressors, state):
"""Save exogenous regressors in model state for use in _prediction_step."""
state_from_time, prediction, _, lstm_state = state
return (state_from_time, prediction,
current_exogenous_regressors, lstm_state)
def train_and_predict(
csv_file_name=_DATA_FILE, training_steps=200, estimator_config=None,
export_directory=None):
"""Train and predict using a custom time series model."""
# Construct an Estimator from our LSTM model.
categorical_column = tf.feature_column.categorical_column_with_hash_bucket(
key="categorical_exogenous_feature", hash_bucket_size=16)
exogenous_feature_columns = [
# Exogenous features are not part of the loss, but can inform
# predictions. In this example the features have no extra information, but
# are included as an API example.
tf.feature_column.numeric_column(
"2d_exogenous_feature", shape=(2,)),
tf.feature_column.embedding_column(
categorical_column=categorical_column, dimension=10)]
estimator = ts_estimators.TimeSeriesRegressor(
model=_LSTMModel(num_features=5, num_units=128,
exogenous_feature_columns=exogenous_feature_columns),
optimizer=tf.train.AdamOptimizer(0.001), config=estimator_config,
# Set state to be saved across windows.
state_manager=state_management.ChainingStateManager())
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5
+ ("2d_exogenous_feature",) * 2
+ ("categorical_exogenous_feature",)),
# Data types other than for `times` need to be specified if they aren't
# float32. In this case one of our exogenous features has string dtype.
column_dtypes=((tf.int64,) + (tf.float32,) * 7 + (tf.string,)))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=4, window_size=32)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
predict_exogenous_features = {
"2d_exogenous_feature": numpy.concatenate(
[numpy.ones([1, 100, 1]), numpy.zeros([1, 100, 1])],
axis=-1),
"categorical_exogenous_feature": numpy.array(
["strkey"] * 100)[None, :, None]}
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100,
exogenous_features=predict_exogenous_features)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, :]
predicted_mean = numpy.squeeze(numpy.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
all_times = numpy.concatenate([times, predictions["times"]], axis=0)
# Export the model in SavedModel format. We include a bit of extra boilerplate
# for "cold starting" as if we didn't have any state from the Estimator, which
# is the case when serving from a SavedModel. If Estimator output is
# available, the result of "Estimator.evaluate" can be passed directly to
# `tf.contrib.timeseries.saved_model_utils.predict_continuation` as the
# `continue_from` argument.
with tf.Graph().as_default():
filter_feature_tensors, _ = evaluation_input_fn()
with tf.train.MonitoredSession() as session:
# Fetch the series to "warm up" our state, which will allow us to make
# predictions for its future values. This is just a dictionary of times,
# values, and exogenous features mapping to numpy arrays. The use of an
# input_fn is just a convenience for the example; they can also be
# specified manually.
filter_features = session.run(filter_feature_tensors)
if export_directory is None:
export_directory = tempfile.mkdtemp()
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
export_location = estimator.export_saved_model(export_directory,
input_receiver_fn)
# Warm up and predict using the SavedModel
with tf.Graph().as_default():
with tf.Session() as session:
signatures = tf.saved_model.loader.load(
session, [tf.saved_model.tag_constants.SERVING], export_location)
state = tf.contrib.timeseries.saved_model_utils.cold_start_filter(
signatures=signatures, session=session, features=filter_features)
saved_model_output = (
tf.contrib.timeseries.saved_model_utils.predict_continuation(
continue_from=state, signatures=signatures,
session=session, steps=100,
exogenous_features=predict_exogenous_features))
# The exported model gives the same results as the Estimator.predict()
# call above.
numpy.testing.assert_allclose(
predictions["mean"],
numpy.squeeze(saved_model_output["mean"], axis=0))
return times, observed, all_times, predicted_mean
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
(observed_times, observations,
all_times, predictions) = train_and_predict()
pyplot.axvline(99, linestyle="dotted")
observed_lines = pyplot.plot(
observed_times, observations, label="Observed", color="k")
predicted_lines = pyplot.plot(
all_times, predictions, label="Predicted", color="b")
pyplot.legend(handles=[observed_lines[0], predicted_lines[0]],
loc="upper left")
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
garaud/ezhc | ezhc/_plot.py | 1 | 1922 |
import os
import pandas as pd
import datetime as dt
import uuid
from IPython.display import HTML
from _config import JS_LIBS_ONE, JS_LIBS_TWO, JS_SAVE
from scripts import JS_JSON_PARSE
def html(options, lib='hicharts', save=False, js_preprocess=None, callback=None):
def json_dumps(obj):
return pd.io.json.dumps(obj)
_opt = dict(options)
chart_id = str(uuid.uuid4()).replace('-', '_')
_opt['chart']['renderTo'] = chart_id
js_init = """
var options = %s;
%s
window.opt = jQuery.extend(true, {}, options);
console.log('Highcharts/Highstock options accessible as opt');
""" % (json_dumps(_opt), JS_JSON_PARSE)
if not js_preprocess:
js_preprocess = ''
if callback:
callback = ', ' + callback
else:
callback = ''
if lib=='highcharts':
js_call = 'new Highcharts.Chart(options%s);' % (callback)
elif lib=='highstock':
js_call = 'new Highcharts.StockChart(options%s);' % (callback)
html = """
<div id="%s"></div>
""" % (chart_id)
js = """<script>
require(%s, function() {
require(%s, function() {
%s
%s
%s
});
});
</script>""" % (JS_LIBS_ONE, JS_LIBS_TWO, js_init, js_preprocess, js_call)
if save==True:
if not os.path.exists('saved'):
os.makedirs('saved')
with open(os.path.join('saved', 'plot_'+dt.datetime.now().strftime('%Y%m%d_%H%M%S')+'.html'), 'w') as f:
contents = """
<script src="%s"></script>
<script src="%s"></script>
%s
""" % (JS_SAVE[0], JS_SAVE[1], html+js)
f.write(contents)
return html+js
def plot(options, lib='hicharts', save=False, js_preprocess=None, callback=None):
contents = html(options, lib, save, js_preprocess, callback)
return HTML(contents)
| mit |
YinongLong/scikit-learn | sklearn/cross_decomposition/cca_.py | 151 | 3192 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
super(CCA, self).__init__(n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
cybernet14/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 76 | 45197 | from itertools import product
import pickle
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([[0.0]], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity')
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity')
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[[]])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah')
assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(DeprecationWarning, neighbors.KNeighborsClassifier,
metric='wminkowski', w=np.ones(10))
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_kneighbors_parallel():
X, y = datasets.make_classification(n_samples=10, n_features=2,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y_1 = clf.predict(X_test)
dist_1, ind_1 = clf.kneighbors(X_test)
A_1 = clf.kneighbors_graph(X_test, mode='distance').toarray()
for n_jobs in [-1, 2, 5]:
clf.set_params(n_jobs=n_jobs)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
A = clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y_1, y)
assert_array_almost_equal(dist_1, dist)
assert_array_equal(ind_1, ind)
assert_array_almost_equal(A_1, A)
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
| bsd-3-clause |
wavelets/amazonaccess | external/greedy.py | 3 | 5002 | """ Greedy feature selection
This file is a slightly modified version of Miroslaw's code.
It generates a dataset containing all 3rd order combinations
of the original columns, then performs greedy feature selection.
Original author: Miroslaw Horbal <[email protected]>
Permission was granted by Miroslaw to publish this snippet as part of
our code.
"""
from sklearn import metrics, cross_validation, linear_model
from scipy import sparse
from itertools import combinations
from helpers import data
import numpy as np
import pandas as pd
SEED = 333
def group_data(data, degree=3, hash=hash):
new_data = []
m, n = data.shape
for indices in combinations(range(n), degree):
new_data.append([hash(tuple(v)) for v in data[:, indices]])
return np.array(new_data).T
def OneHotEncoder(data, keymap=None):
"""
OneHotEncoder takes data matrix with categorical columns and
converts it to a sparse binary matrix.
Returns sparse binary matrix and keymap mapping categories to indicies.
If a keymap is supplied on input it will be used instead of creating one
and any categories appearing in the data that are not in the keymap are
ignored
"""
if keymap is None:
keymap = []
for col in data.T:
uniques = set(list(col))
keymap.append(dict((key, i) for i, key in enumerate(uniques)))
total_pts = data.shape[0]
outdat = []
for i, col in enumerate(data.T):
km = keymap[i]
num_labels = len(km)
spmat = sparse.lil_matrix((total_pts, num_labels))
for j, val in enumerate(col):
if val in km:
spmat[j, km[val]] = 1
outdat.append(spmat)
outdat = sparse.hstack(outdat).tocsr()
return outdat, keymap
def cv_loop(X, y, model, N):
mean_auc = 0.
for i in range(N):
X_train, X_cv, y_train, y_cv = cross_validation.train_test_split(
X, y, test_size=.20,
random_state=i*SEED)
model.fit(X_train, y_train)
preds = model.predict_proba(X_cv)[:, 1]
auc = metrics.auc_score(y_cv, preds)
print "AUC (fold %d/%d): %f" % (i + 1, N, auc)
mean_auc += auc
return mean_auc/N
def create_features(train='data/train.csv', test='data/test.csv'):
print "Reading dataset..."
train_data = pd.read_csv(train)
test_data = pd.read_csv(test)
all_data = np.vstack((train_data.ix[:, 1:-1], test_data.ix[:, 1:-1]))
num_train = np.shape(train_data)[0]
# Transform data
print "Transforming data..."
dp = group_data(all_data, degree=2)
dt = group_data(all_data, degree=3)
y = np.array(train_data.ACTION)
X = all_data[:num_train]
X_2 = dp[:num_train]
X_3 = dt[:num_train]
X_test = all_data[num_train:]
X_test_2 = dp[num_train:]
X_test_3 = dt[num_train:]
X_train_all = np.hstack((X, X_2, X_3))
X_test_all = np.hstack((X_test, X_test_2, X_test_3))
num_features = X_train_all.shape[1]
model = linear_model.LogisticRegression()
# Xts holds one hot encodings for each individual feature in memory
# speeding up feature selection
Xts = [OneHotEncoder(X_train_all[:, [i]])[0] for i in range(num_features)]
print "Performing greedy feature selection..."
score_hist = []
N = 10
good_features_list = [
[0, 8, 9, 10, 19, 34, 36, 37, 38, 41, 42, 43, 47, 53, 55,
60, 61, 63, 64, 67, 69, 71, 75, 81, 82, 85],
[0, 1, 7, 8, 9, 10, 36, 37, 38, 41, 42, 43, 47, 51, 53,
56, 60, 61, 63, 64, 66, 67, 69, 71, 75, 79, 85, 91],
[0, 7, 9, 24, 36, 37, 41, 42, 47, 53, 61, 63, 64, 67, 69, 71, 75, 85],
[0, 7, 9, 20, 36, 37, 38, 41, 42, 45, 47,
53, 60, 63, 64, 67, 69, 71, 81, 85, 86]
]
# Greedy feature selection loop
if not good_features_list:
good_features = set([])
while len(score_hist) < 2 or score_hist[-1][0] > score_hist[-2][0]:
scores = []
for f in range(len(Xts)):
if f not in good_features:
feats = list(good_features) + [f]
Xt = sparse.hstack([Xts[j] for j in feats]).tocsr()
score = cv_loop(Xt, y, model, N)
scores.append((score, f))
print "Feature: %i Mean AUC: %f" % (f, score)
good_features.add(sorted(scores)[-1][1])
score_hist.append(sorted(scores)[-1])
print "Current features: %s" % sorted(list(good_features))
# Remove last added feature from good_features
good_features.remove(score_hist[-1][1])
good_features = sorted(list(good_features))
for i, good_features in enumerate(good_features_list):
suffix = str(i + 1) if i else ''
Xt = np.vstack((X_train_all[:, good_features],
X_test_all[:, good_features]))
X_train = Xt[:num_train]
X_test = Xt[num_train:]
data.save_dataset("greedy%s" % suffix, X_train, X_test)
| mit |
NelisVerhoef/scikit-learn | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
MarcSpitz/ldebroux_kjadin_masters-thesis_2014 | src/impact.py | 1 | 7545 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author: Debroux Léonard <[email protected]>
# @author: Jadin Kevin <[email protected]>
import sys, os
from utils import Utils
from setup import Setup
import logging as log
from pylab import boxplot, savefig, figure, tight_layout
from matplotlib import gridspec
from abstracttest import AbstractTest
class ImpactTest(AbstractTest):
""" Impact test
Measures the impact of improvements on the tree
Plots the distribution of improvements values,
the proportion of successful improvements and the impact on the tree
"""
def impactedNodes(self, edgeList1, edgeList2):
""" returns the set of impacted nodes from edgeList1 to edgeList2 """
edgeSet1 = set(edgeList1)
edgeSet2 = set(edgeList2)
union = edgeSet1 | edgeSet2
intersection = edgeSet1 & edgeSet2
impactedEdges = union - intersection
impactedNodes = set()
for e in impactedEdges:
(n1, n2) = e
impactedNodes.add(n1)
impactedNodes.add(n2)
return impactedNodes
def addValueToDictOfList(self, dictionary, key, value):
if key in dictionary:
dictionary[key].append(value)
else:
dictionary[key] = [value]
def writeImpact(self, edgesBefore, edgesAfter, weightBefore, weightAfter, improveNb, nbOfImprove, f):
writeNewline = self.writeNewline
writeNewline('improve %s/%s' % (improveNb, nbOfImprove), f)
writeNewline('edges_before_improve: %s' % edgesBefore, f)
writeNewline('weight_before_improve: %s' % weightBefore, f)
writeNewline('edges_after_improve: %s' % edgesAfter, f)
writeNewline('weight_after_improve: %s' % weightAfter, f)
writeNewline('', f)
def run(self):
timestr = Utils.getTimeString()
log.info("test started at: %s" % timestr)
k = 1 # unused
custom_setupDicts = self.setupDicts
dataSets = self.dataSets
NGdict = self.NGdict
root = self.root
tests = self.tests
config_file = self.config_file
shortest_paths_file = self.shortest_paths_file_name
impactDict = {}
nodesInGraph = len(NGdict[k].nodes())
prependstr = "_".join([config_file, shortest_paths_file])
filename = "%s_impact.txt" % (prependstr)
log.info("writing to file %s" % filename)
filename = os.path.join(self.working_directory, filename)
f = open(filename,'w')
writeNewline = self.writeNewline # shortcut
writeNewline(filename, f)
log.debug("nodesInGraph %s" % nodesInGraph)
for dataSet in dataSets:
dataSetIndex = dataSets.index(dataSet)
for s in custom_setupDicts:
setupIndex = custom_setupDicts.index(s)
self.log_progression(dataSetIndex+1, len(dataSets), setupIndex+1, len(custom_setupDicts), self.testname)
Setup.reset_setup() # start from default
Setup.configure(s)
# improves will be added to dataSet while keeping dataSet intact
events = self.addImproveToDataSet(dataSet)
if log.getLogger(__name__).isEnabledFor(log.INFO): # logs only printed if asked for
self.print_logs()
for idx in range(tests):
log.debug('test %s' % idx)
_, _, _, stateOfImprove, _, _ = Utils.run_setup(NGdict[k], root, events)
self.writeDataHeader(f, dataSetIndex, setupIndex, idx)
i = 1
nbOfImprove = len(stateOfImprove)
for (edgesBefore, edgesAfter, weightBefore, weightAfter) in stateOfImprove:
self.writeImpact(edgesBefore, edgesAfter, weightBefore, weightAfter, i, nbOfImprove, f)
i += 1
impactedNodesAtStep = self.impactedNodes(edgesBefore, edgesAfter)
impact = len(impactedNodesAtStep)/float(nodesInGraph)
improve = (weightBefore - weightAfter) / float(weightBefore)
self.addValueToDictOfList(impactDict, round(improve*100, 2), round(impact*100, 2))
f.close()
keys = sorted(impactDict.keys())
zeroMeasures = []
if 0 in keys:
zeroMeasures = impactDict[0]
keys.remove(0)
minKey = min(keys)
maxKey = max(keys)
nbBuckets = 8
# create bins
bucketWidth = (maxKey-minKey)/float(nbBuckets)
bins = [minKey]
x = minKey
binValue = round(x, 3)
while binValue < maxKey:
x += bucketWidth
binValue = round(x, 3)
bins.append(binValue)
#create locations at the middle of the bins
locations = []
for x in bins[:-1]:
loc = x+bucketWidth/2.0
locations.append(round(loc, 1))
measures = []
impacts = []
for k in keys:
l = impactDict[k]
for v in l:
measures.append(k)
# building the buckets for the boxplot
binIndex = 1
bucketImpacts = []
while keys:
for k in sorted(keys): # need to sort
if k <= bins[binIndex]:
bucketImpacts.extend(impactDict[k])
keys.remove(k)
else:
impacts.append(bucketImpacts)
bucketImpacts = []
binIndex += 1
break
impacts.append(bucketImpacts)
# plotting
fig = figure()
gs = gridspec.GridSpec(1, 2, width_ratios=[11, 1])
ax1 = fig.add_subplot(gs[0])
ax1.hist(measures, bins, color='lightgreen')
ax1.set_xlabel('Tree improvement [%]')
ax1.set_ylabel('Nb measures')
ax1.yaxis.set_label_position("right")
ax2 = ax1.twinx()
ax1.yaxis.tick_right()
ax1.set_xlim(left=bins[0])
ax2.yaxis.tick_left()
bp = ax2.boxplot(impacts, positions=locations, widths=bucketWidth/2.5, patch_artist=True)
ax2.set_ylabel('Impacted nodes [%]')
ax2.yaxis.set_label_position("left")
c = self.colors
for box in bp['boxes']:
# change outline
box.set( linewidth=0 )
# change fill color
box.set( facecolor = c['orange'] )
# change color and linewidth of the whiskers
for whisker in bp['whiskers']:
whisker.set(color=c['gray'], linewidth=2)
# change color and linewidth of the caps
for cap in bp['caps']:
cap.set(color=c['black'], linewidth=2)
# change color and linewidth of the medians
for median in bp['medians']:
median.set(color=c['black'], linewidth=5)
for label in ax1.xaxis.get_ticklabels():
label.set_rotation(60)
ax3 = fig.add_subplot(gs[1])
measures = [0]*len(measures)
if measures and zeroMeasures:
data = [measures, zeroMeasures]
colors=[c['green'], c['red']]
labels=['Used', 'Unused']
elif not zeroMeasures:
data = measures
colors=c['green']
labels='Used'
elif not measures:
data = zeroMeasures
colors=c['red']
labels='Unused'
else:
raise Exception('I have no data :(')
lines = ax3.hist(data, 1, normed=0, histtype='bar', stacked=True, color=colors, label=labels)
ax3.xaxis.set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.spines['top'].set_visible(False)
ax3.spines['bottom'].set_visible(False)
ax3.xaxis.set_ticks_position('bottom')
ax3.yaxis.set_ticks_position('left')
prependstr = "_".join([config_file, shortest_paths_file])
filename = "%s_improve.eps" % (prependstr)
filename = os.path.join(self.working_directory, filename)
log.info("writing to file %s" % filename)
tight_layout()
savefig(filename)
def main(argv):
impact = ImpactTest()
impact.run()
if __name__ == "__main__":
main(sys.argv)
| gpl-2.0 |
xiaoxq/apollo | modules/tools/routing/road_show.py | 3 | 4618 | #!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""Show road."""
import sys
import matplotlib.pyplot as plt
import modules.tools.common.proto_utils as proto_utils
import modules.tools.routing.util as util
g_color = [
'navy', 'c', 'cornflowerblue', 'gold', 'darkorange', 'darkviolet',
'aquamarine', 'firebrick', 'limegreen'
]
def draw_line(line_segment, color):
"""
:param line_segment:
:return: none
"""
px, py = proto_utils.flatten(line_segment.point, ['x', 'y'])
px, py = downsample_array(px), downsample_array(py)
plt.gca().plot(px, py, lw=10, alpha=0.8, color=color)
return px[len(px) // 2], py[len(py) // 2]
def draw_arc(arc):
"""
:param arc: proto obj
:return: none
"""
xy = (arc.center.x, arc.center.y)
start = 0
end = 0
if arc.start_angle < arc.end_angle:
start = arc.start_angle / math.pi * 180
end = arc.end_angle / math.pi * 180
else:
end = arc.start_angle / math.pi * 180
start = arc.end_angle / math.pi * 180
pac = mpatches.Arc(
xy, arc.radius * 2, arc.radius * 2, angle=0, theta1=start, theta2=end)
plt.gca().add_patch(pac)
def downsample_array(array):
"""down sample given array"""
skip = 5
result = array[::skip]
result.append(array[-1])
return result
def draw_boundary(line_segment):
"""
:param line_segment:
:return:
"""
px, py = proto_utils.flatten(line_segment.point, ['x', 'y'])
px, py = downsample_array(px), downsample_array(py)
plt.gca().plot(px, py, 'k')
def draw_id(x, y, id_string):
"""Draw id_string on (x, y)"""
plt.annotate(
id_string,
xy=(x, y),
xytext=(40, -40),
textcoords='offset points',
ha='right',
va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='green', alpha=0.5),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))
def get_road_index_of_lane(lane_id, road_lane_set):
"""Get road index of lane"""
for i, lane_set in enumerate(road_lane_set):
if lane_id in lane_set:
return i
return -1
def draw_map(drivemap):
""" draw map from mapfile"""
print('Map info:')
print('\tVersion:\t', end=' ')
print(drivemap.header.version)
print('\tDate:\t', end=' ')
print(drivemap.header.date)
print('\tDistrict:\t', end=' ')
print(drivemap.header.district)
road_lane_set = []
for road in drivemap.road:
lanes = []
for sec in road.section:
lanes.extend(proto_utils.flatten(sec.lane_id, 'id'))
road_lane_set.append(lanes)
for lane in drivemap.lane:
for curve in lane.central_curve.segment:
if curve.HasField('line_segment'):
road_idx = get_road_index_of_lane(lane.id.id, road_lane_set)
if road_idx == -1:
print('Failed to get road index of lane')
sys.exit(-1)
center_x, center_y = draw_line(curve.line_segment,
g_color[road_idx % len(g_color)])
draw_id(center_x, center_y, str(road_idx))
# break
# if curve.HasField('arc'):
# draw_arc(curve.arc)
for curve in lane.left_boundary.curve.segment:
if curve.HasField('line_segment'):
draw_boundary(curve.line_segment)
for curve in lane.right_boundary.curve.segment:
if curve.HasField('line_segment'):
draw_boundary(curve.line_segment)
# break
return drivemap
if __name__ == "__main__":
print("Reading map data")
map_dir = util.get_map_dir(sys.argv)
base_map = util.get_mapdata(map_dir)
print("Done reading map data")
plt.subplots()
draw_map(base_map)
plt.axis('equal')
plt.show()
| apache-2.0 |
Garrett-R/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
nsauder/treeano | examples/prelu/mnist_cnn.py | 3 | 3553 | from __future__ import division, absolute_import
from __future__ import print_function, unicode_literals
import itertools
import numpy as np
import sklearn.datasets
import sklearn.cross_validation
import sklearn.metrics
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
import canopy
from treeano.sandbox.nodes import prelu
fX = theano.config.floatX
# ############################### prepare data ###############################
mnist = sklearn.datasets.fetch_mldata('MNIST original')
# theano has a constant float type that it uses (float32 for GPU)
# also rescaling to [0, 1] instead of [0, 255]
X = mnist['data'].reshape(-1, 1, 28, 28).astype(fX) / 255.0
y = mnist['target'].astype("int32")
X_train, X_valid, y_train, y_valid = sklearn.cross_validation.train_test_split(
X, y, random_state=42)
in_train = {"x": X_train, "y": y_train}
in_valid = {"x": X_valid, "y": y_valid}
# ############################## prepare model ##############################
model = tn.HyperparameterNode(
"model",
tn.SequentialNode(
"seq",
[tn.InputNode("x", shape=(None, 1, 28, 28)),
tn.Conv2DWithBiasNode("conv1"),
prelu.PReLUNode("prelu1"),
tn.MaxPool2DNode("mp1"),
tn.Conv2DWithBiasNode("conv2"),
prelu.PReLUNode("prelu2"),
tn.MaxPool2DNode("mp2"),
tn.DenseNode("fc1"),
prelu.PReLUNode("prelu3"),
tn.DropoutNode("do1"),
tn.DenseNode("fc2", num_units=10),
tn.SoftmaxNode("pred"),
]),
num_filters=32,
filter_size=(5, 5),
pool_size=(2, 2),
num_units=256,
dropout_probability=0.5,
inits=[treeano.inits.XavierNormalInit()],
)
with_updates = tn.HyperparameterNode(
"with_updates",
tn.AdamNode(
"adam",
{"subtree": model,
"cost": tn.TotalCostNode("cost", {
"pred": tn.ReferenceNode("pred_ref", reference="model"),
"target": tn.InputNode("y", shape=(None,), dtype="int32")},
)}),
cost_function=treeano.utils.categorical_crossentropy_i32,
)
network = with_updates.network()
network.build() # build eagerly to share weights
BATCH_SIZE = 500
valid_fn = canopy.handled_fn(
network,
[canopy.handlers.time_call(key="valid_time"),
canopy.handlers.override_hyperparameters(dropout_probability=0),
canopy.handlers.chunk_variables(batch_size=BATCH_SIZE,
variables=["x", "y"])],
{"x": "x", "y": "y"},
{"cost": "cost", "pred": "pred"})
def validate(in_dict, results_dict):
valid_out = valid_fn(in_valid)
probabilities = valid_out["pred"]
predicted_classes = np.argmax(probabilities, axis=1)
results_dict["valid_cost"] = valid_out["cost"]
results_dict["valid_time"] = valid_out["valid_time"]
results_dict["valid_accuracy"] = sklearn.metrics.accuracy_score(
y_valid, predicted_classes)
train_fn = canopy.handled_fn(
network,
[canopy.handlers.time_call(key="total_time"),
canopy.handlers.call_after_every(1, validate),
canopy.handlers.time_call(key="train_time"),
canopy.handlers.chunk_variables(batch_size=BATCH_SIZE,
variables=["x", "y"])],
{"x": "x", "y": "y"},
{"train_cost": "cost"},
include_updates=True)
# ################################# training #################################
print("Starting training...")
canopy.evaluate_until(fn=train_fn,
gen=itertools.repeat(in_train),
max_iters=25)
| apache-2.0 |
voxlol/scikit-learn | examples/svm/plot_separating_hyperplane_unbalanced.py | 329 | 1850 | """
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
.. currentmodule:: sklearn.linear_model
.. note::
This example will also work by replacing ``SVC(kernel="linear")``
with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter
of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour
such as that of a SVC with a linear kernel.
For example try instead of the ``SVC``::
clf = SGDClassifier(n_iter=100, alpha=0.01)
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
#from sklearn.linear_model import SGDClassifier
# we create 40 separable points
rng = np.random.RandomState(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * rng.randn(n_samples_1, 2),
0.5 * rng.randn(n_samples_2, 2) + [2, 2]]
y = [0] * (n_samples_1) + [1] * (n_samples_2)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - clf.intercept_[0] / w[1]
# get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
ww = wclf.coef_[0]
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_[0] / ww[1]
# plot separating hyperplanes and samples
h0 = plt.plot(xx, yy, 'k-', label='no weights')
h1 = plt.plot(xx, wyy, 'k--', label='with weights')
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.legend()
plt.axis('tight')
plt.show()
| bsd-3-clause |
phobson/statsmodels | statsmodels/datasets/template_data.py | 31 | 1680 | #! /usr/bin/env python
"""Name of dataset."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """E.g., This is public domain."""
TITLE = """Title of the dataset"""
SOURCE = """
This section should provide a link to the original dataset if possible and
attribution and correspondance information for the dataset's original author
if so desired.
"""
DESCRSHORT = """A short description."""
DESCRLONG = """A longer description of the dataset."""
#suggested notes
NOTE = """
::
Number of observations:
Number of variables:
Variable name definitions:
Any other useful information that does not fit into the above categories.
"""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray(data, endog_idx=0, exog_idx=None, dtype=float)
def load_pandas():
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray_pandas(data, endog_idx=0, exog_idx=None,
dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
data = np.recfromtxt(open(filepath + '/DatasetName.csv', 'rb'),
delimiter=",", names=True, dtype=float)
return data
| bsd-3-clause |
cgpotts/cs224u | torch_rnn_classifier.py | 1 | 17213 | import numpy as np
from operator import itemgetter
import torch
import torch.nn as nn
import torch.utils.data
from torch_model_base import TorchModelBase
import utils
__author__ = "Christopher Potts"
__version__ = "CS224u, Stanford, Spring 2021"
class TorchRNNDataset(torch.utils.data.Dataset):
def __init__(self, sequences, seq_lengths, y=None):
"""
Dataset class for RNN classifiers. The heavy-lifting is done by
`collate_fn`, which handles the padding and packing necessary to
efficiently process variable length sequences.
Parameters
----------
sequences : list of `torch.LongTensor`, `len(n_examples)`
seq_lengths : torch.LongTensor, shape `(n_examples, )`
y : None or torch.LongTensor, shape `(n_examples, )`
If None, then we are in prediction mode. Otherwise, these are
indices into the list of classes.
"""
assert len(sequences) == len(seq_lengths)
self.sequences = sequences
self.seq_lengths = seq_lengths
if y is not None:
assert len(sequences) == len(y)
self.y = y
@staticmethod
def collate_fn(batch):
"""
Format a batch of examples for use in both training and prediction.
Parameters
----------
batch : tuple of length 2 (prediction) or 3 (training)
The first element is the list of input sequences. The
second is the list of lengths for those sequences. The third,
where present, is the list of labels.
Returns
-------
X : torch.Tensor, shape `(batch_size, max_batch_length)`
As padded by `torch.nn.utils.rnn.pad_sequence.
seq_lengths : torch.LongTensor, shape `(batch_size, )`
y : torch.LongTensor, shape `(batch_size, )`
Only for training. In the case where `y` cannot be turned into
a Tensor, we assume it is because it is a list of variable
length sequences and to use `torch.nn.utils.rnn.pad_sequence`.
The hope is that this will accomodate sequence prediction.
"""
batch_elements = list(zip(*batch))
X = batch_elements[0]
seq_lengths = batch_elements[1]
X = torch.nn.utils.rnn.pad_sequence(X, batch_first=True)
seq_lengths = torch.tensor(seq_lengths)
if len(batch_elements) == 3:
y = batch_elements[2]
# We can try to accommodate the case where `y` is a sequence
# loss with potentially different lengths by resorting to
# padding if creating a tensor is not possible:
try:
y = torch.tensor(y)
except ValueError:
y = torch.nn.utils.rnn.pad_sequence(y, batch_first=True)
return X, seq_lengths, y
else:
return X, seq_lengths
def __len__(self):
return len(self.sequences)
def __getitem__(self, idx):
if self.y is not None:
return self.sequences[idx], self.seq_lengths[idx], self.y[idx]
else:
return self.sequences[idx], self.seq_lengths[idx]
class TorchRNNModel(nn.Module):
def __init__(self,
vocab_size,
embed_dim=50,
embedding=None,
use_embedding=True,
rnn_cell_class=nn.LSTM,
hidden_dim=50,
bidirectional=False,
freeze_embedding=False):
"""
Defines the core RNN computation graph. For an explanation of the
parameters, see `TorchRNNClassifierModel`. This class handles just
the RNN components of the overall classifier model.
`TorchRNNClassifierModel` uses the output states to create a
classifier.
"""
super().__init__()
self.vocab_size = vocab_size
self.use_embedding = use_embedding
self.embed_dim = embed_dim
self.hidden_dim = hidden_dim
self.bidirectional = bidirectional
self.freeze_embedding = freeze_embedding
# Graph
if self.use_embedding:
self.embedding = self._define_embedding(
embedding, vocab_size, self.embed_dim, self.freeze_embedding)
self.embed_dim = self.embedding.embedding_dim
self.rnn = rnn_cell_class(
input_size=self.embed_dim,
hidden_size=hidden_dim,
batch_first=True,
bidirectional=bidirectional)
def forward(self, X, seq_lengths):
if self.use_embedding:
X = self.embedding(X)
embs = torch.nn.utils.rnn.pack_padded_sequence(
X,
batch_first=True,
lengths=seq_lengths.cpu(),
enforce_sorted=False)
outputs, state = self.rnn(embs)
return outputs, state
@staticmethod
def _define_embedding(embedding, vocab_size, embed_dim, freeze_embedding):
if embedding is None:
emb = nn.Embedding(vocab_size, embed_dim)
emb.weight.requires_grad = not freeze_embedding
return emb
elif isinstance(embedding, np.ndarray):
embedding = torch.FloatTensor(embedding)
return nn.Embedding.from_pretrained(
embedding, freeze=freeze_embedding)
else:
return embedding
class TorchRNNClassifierModel(nn.Module):
def __init__(self, rnn, output_dim, classifier_activation):
"""
Defines the core computation graph for `TorchRNNClassifier`. This
involves using the outputs of a `TorchRNNModel` instance to
build a softmax classifier:
h[t] = rnn(x[t], h[t-1])
h = f(h[-1].dot(W_hy) + b_h)
y = softmax(hW + b_y)
This class uses its `rnn` parameter to compute each `h[1]`, and
then it adds the classifier parameters that use `h[-1]` as inputs.
Where `bidirectional=True`, `h[-1]` is `torch.cat([h[0], h[-1])`.
"""
super().__init__()
self.rnn = rnn
self.output_dim = output_dim
self.hidden_dim = self.rnn.hidden_dim
if self.rnn.bidirectional:
self.classifier_dim = self.hidden_dim * 2
else:
self.classifier_dim = self.hidden_dim
self.hidden_layer = nn.Linear(
self.classifier_dim, self.hidden_dim)
self.classifier_activation = classifier_activation
self.classifier_layer = nn.Linear(
self.hidden_dim, self.output_dim)
def forward(self, X, seq_lengths):
outputs, state = self.rnn(X, seq_lengths)
state = self.get_batch_final_states(state)
if self.rnn.bidirectional:
state = torch.cat((state[0], state[1]), dim=1)
h = self.classifier_activation(self.hidden_layer(state))
logits = self.classifier_layer(h)
return logits
def get_batch_final_states(self, state):
if self.rnn.rnn.__class__.__name__ == 'LSTM':
return state[0].squeeze(0)
else:
return state.squeeze(0)
class TorchRNNClassifier(TorchModelBase):
def __init__(self,
vocab,
hidden_dim=50,
embedding=None,
use_embedding=True,
embed_dim=50,
rnn_cell_class=nn.LSTM,
bidirectional=False,
freeze_embedding=False,
classifier_activation=nn.ReLU(),
**base_kwargs):
"""
RNN-based Recurrent Neural Network for classification problems.
The network will work for any kind of classification task.
Parameters
----------
vocab : list of str
This should be the vocabulary. It needs to be aligned with
`embedding` in the sense that the ith element of vocab
should be represented by the ith row of `embedding`. Ignored
if `use_embedding=False`.
embedding : np.array or None
Each row represents a word in `vocab`, as described above.
use_embedding : bool
If True, then incoming examples are presumed to be lists of
elements of the vocabulary. If False, then they are presumed
to be lists of vectors. In this case, the `embedding` and
`embed_dim` arguments are ignored, since no embedding is needed
and `embed_dim` is set by the nature of the incoming vectors.
embed_dim : int
Dimensionality for the initial embeddings. This is ignored
if `embedding` is not None, as a specified value there
determines this value. Also ignored if `use_embedding=False`.
rnn_cell_class : class for PyTorch recurrent layer
Should be just the class name, not an instance of the class.
hidden_dim : int
Dimensionality of the hidden layer in the RNN.
bidirectional : bool
If True, then the final hidden states from passes in both
directions are used.
freeze_embedding : bool
If True, the embedding will be updated during training. If
False, the embedding will be frozen. This parameter applies
to both randomly initialized and pretrained embeddings.
classifier_activation : nn.Module
The non-activation function used by the network for the
hidden layer of the classifier.
**base_kwargs
For details, see `torch_model_base.py`.
Attributes
----------
loss: nn.CrossEntropyLoss(reduction="mean")
self.params: list
Extends TorchModelBase.params with names for all of the
arguments for this class to support tuning of these values
using `sklearn.model_selection` tools.
"""
self.vocab = vocab
self.hidden_dim = hidden_dim
self.embedding = embedding
self.use_embedding = use_embedding
self.embed_dim = embed_dim
self.rnn_cell_class = rnn_cell_class
self.bidirectional = bidirectional
self.freeze_embedding = freeze_embedding
self.classifier_activation = classifier_activation
super().__init__(**base_kwargs)
self.params += [
'hidden_dim',
'embed_dim',
'embedding',
'use_embedding',
'rnn_cell_class',
'bidirectional',
'freeze_embedding',
'classifier_activation']
self.loss = nn.CrossEntropyLoss(reduction="mean")
def build_graph(self):
"""
The core computation graph. This is called by `fit`, which sets
the `self.model` attribute.
Returns
-------
TorchRNNModel
"""
rnn = TorchRNNModel(
vocab_size=len(self.vocab),
embedding=self.embedding,
use_embedding=self.use_embedding,
embed_dim=self.embed_dim,
rnn_cell_class=self.rnn_cell_class,
hidden_dim=self.hidden_dim,
bidirectional=self.bidirectional,
freeze_embedding=self.freeze_embedding)
model = TorchRNNClassifierModel(
rnn=rnn,
output_dim=self.n_classes_,
classifier_activation=self.classifier_activation)
self.embed_dim = rnn.embed_dim
return model
def build_dataset(self, X, y=None):
"""
Format data for training and prediction.
Parameters
----------
X : list of lists
The raw sequences. The lists are expected to contain
elements of `self.vocab`. This method converts them to
indices for PyTorch.
y : list or None
The raw labels. This method turns them into indices for
PyTorch processing. If None, then we are in prediction
mode.
Returns
-------
TorchRNNDataset
"""
X, seq_lengths = self._prepare_sequences(X)
if y is None:
return TorchRNNDataset(X, seq_lengths)
else:
self.classes_ = sorted(set(y))
self.n_classes_ = len(self.classes_)
class2index = dict(zip(self.classes_, range(self.n_classes_)))
y = [class2index[label] for label in y]
return TorchRNNDataset(X, seq_lengths, y)
def _prepare_sequences(self, X):
"""
Internal method for turning X into a list of indices into
`self.vocab` and calculating the true lengths of the elements
in `X`.
Parameters
----------
X : list of lists, `len(n_examples)`
Returns
-------
new_X : list of lists, `len(n_examples)`
seq_lengths : torch.LongTensor, shape `(n_examples, )`
"""
if self.use_embedding:
new_X = []
seq_lengths = []
index = dict(zip(self.vocab, range(len(self.vocab))))
unk_index = index['$UNK']
for ex in X:
seq = [index.get(w, unk_index) for w in ex]
seq = torch.tensor(seq)
new_X.append(seq)
seq_lengths.append(len(seq))
else:
new_X = [torch.FloatTensor(ex) for ex in X]
seq_lengths = [len(ex) for ex in X]
self.embed_dim = X[0][0].shape[0]
seq_lengths = torch.tensor(seq_lengths)
return new_X, seq_lengths
def score(self, X, y, device=None):
"""
Uses macro-F1 as the score function. Note: this departs from
`sklearn`, where classifiers use accuracy as their scoring
function. Using macro-F1 is more consistent with our course.
This function can be used to evaluate models, but its primary
use is in cross-validation and hyperparameter tuning.
Parameters
----------
X: np.array, shape `(n_examples, n_features)`
y: iterable, shape `len(n_examples)`
These can be the raw labels. They will converted internally
as needed. See `build_dataset`.
device: str or None
Allows the user to temporarily change the device used
during prediction. This is useful if predictions require a
lot of memory and so are better done on the CPU. After
prediction is done, the model is returned to `self.device`.
Returns
-------
float
"""
preds = self.predict(X, device=device)
return utils.safe_macro_f1(y, preds)
def predict_proba(self, X, device=None):
"""
Predicted probabilities for the examples in `X`.
Parameters
----------
X : np.array, shape `(n_examples, n_features)`
device: str or None
Allows the user to temporarily change the device used
during prediction. This is useful if predictions require a
lot of memory and so are better done on the CPU. After
prediction is done, the model is returned to `self.device`.
Returns
-------
np.array, shape `(len(X), self.n_classes_)`
Each row of this matrix will sum to 1.0.
"""
preds = self._predict(X, device=device)
probs = torch.softmax(preds, dim=1).cpu().numpy()
return probs
def predict(self, X, device=None):
"""
Predicted labels for the examples in `X`. These are converted
from the integers that PyTorch needs back to their original
values in `self.classes_`.
Parameters
----------
X : np.array, shape `(n_examples, n_features)`
device: str or None
Allows the user to temporarily change the device used
during prediction. This is useful if predictions require a
lot of memory and so are better done on the CPU. After
prediction is done, the model is returned to `self.device`.
Returns
-------
list, length len(X)
"""
probs = self.predict_proba(X, device=device)
return [self.classes_[i] for i in probs.argmax(axis=1)]
def simple_example():
utils.fix_random_seeds()
vocab = ['a', 'b', '$UNK']
# No b before an a
train = [
[list('ab'), 'good'],
[list('aab'), 'good'],
[list('abb'), 'good'],
[list('aabb'), 'good'],
[list('ba'), 'bad'],
[list('baa'), 'bad'],
[list('bba'), 'bad'],
[list('bbaa'), 'bad'],
[list('aba'), 'bad']]
test = [
[list('baaa'), 'bad'],
[list('abaa'), 'bad'],
[list('bbaa'), 'bad'],
[list('aaab'), 'good'],
[list('aaabb'), 'good']]
X_train, y_train = zip(*train)
X_test, y_test = zip(*test)
mod = TorchRNNClassifier(vocab)
print(mod)
mod.fit(X_train, y_train)
preds = mod.predict(X_test)
print("\nPredictions:")
for ex, pred, gold in zip(X_test, preds, y_test):
score = "correct" if pred == gold else "incorrect"
print("{0:>6} - predicted: {1:>4}; actual: {2:>4} - {3}".format(
"".join(ex), pred, gold, score))
return mod.score(X_test, y_test)
if __name__ == '__main__':
simple_example()
| apache-2.0 |
googleinterns/intern2020_cocal | uncertainty/plots/plot_temp_trend.py | 1 | 4864 | import os, sys
import numpy as np
import argparse
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import tensorflow as tf
import data
import model
from uncertainty import ECE
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='plot ECE trend')
## meta args
parser.add_argument('--src', type=str, nargs='*', help='list of sources') ##TODO: how to restrict possible inputs?
parser.add_argument('--tar', type=str, default='SVHN')
parser.add_argument('--aug', action='store_true')
parser.add_argument('--fontsize', type=int, default=15)
args = parser.parse_args()
fontsize = args.fontsize
aug = args.aug
src_short = ''
for n in args.src:
src_short += n[0].lower()
fig_root = 'snapshots/figs/src_%s_tar_%s%s'%(src_short, args.tar.lower(), '_aug' if aug else '')
os.makedirs(fig_root, exist_ok=True)
if aug:
T_rng = np.arange(0.1, 10.0+0.1, 0.1)
aug_params = [[
('intensity_flip', {}),
('intensity_scaling', {'min': -1.5, 'max': 1.5}),
('intensity_offset', {'min': -0.5, 'max': 0.5}),
('affine', {'std': 0.1}),
('translation', {'x_max': 2.0, 'y_max': 2.0}),
('gaussian', {'std': 0.1}),
]]
else:
T_rng = np.concatenate((np.array([1.0]), np.arange(10.0, 500.0+10.0, 10.0)))
aug_params = [None]
print(T_rng)
## init a loader
ds_src = data.MultiSourceDataset(args.src, aug_params, batch_size=100, val_shuffle=True, val_aug=True, test_aug=True)
ds_tar = getattr(data, 'SVHN')(batch_size=100, val_shuffle=True)
## load a trained model
mdl = getattr(model, 'ResNet18')(num_class=10, activation='relu', input_shape=(32, 32, 3)) ##TODO: generalize
mdl_cal = model.TempCls(mdl)
mdl_cal.load_weights('snapshots/%s2svhn%s_ResNet18/model_params_cal_best'%(src_short, '_aug' if aug else ''))
## error/ece
Ts, eces_src, eces_tar = [], [], []
for T in T_rng:
mdl_cal.T = tf.constant(T, dtype=tf.float32)
Ts.append(T)
for ds_type in ['src', 'tar']:
dsld = ds_tar.test if ds_type == 'tar' else ds_src.test
## target
ph_list, yh_list, y_list = [], [], []
for x, y in dsld:
logits = mdl_cal(x, training=False)['logits']
ph = tf.math.reduce_max(tf.nn.softmax(logits, -1), -1)
yh = tf.math.argmax(logits, -1)
ph_list.append(ph.numpy())
yh_list.append(yh.numpy())
y_list.append(y.numpy())
ece = ECE(np.concatenate(ph_list), np.concatenate(yh_list), np.concatenate(y_list),
rel_diag_fn=os.path.join(fig_root, 'rel_diag_%s%s_T_%f'%(src_short, '_aug' if aug else '', T)))
error = np.mean(np.concatenate(y_list) != np.concatenate(yh_list))
if ds_type == 'src':
ece_src = ece
error_src = error
eces_src.append(ece_src*100.0)
## draw the best reliability diagram
if T == Ts[np.argmin(np.array(eces_src))]:
ECE(np.concatenate(ph_list), np.concatenate(yh_list), np.concatenate(y_list),
rel_diag_fn=os.path.join(fig_root, 'rel_diag_%s%s_src_best_T'%(src_short, '_aug' if aug else '')))
else:
ece_tar = ece
error_tar = error
eces_tar.append(ece_tar*100.0)
## draw the best reliability diagram
if T == Ts[np.argmin(np.array(eces_tar))]:
ECE(np.concatenate(ph_list), np.concatenate(yh_list), np.concatenate(y_list),
rel_diag_fn=os.path.join(fig_root, 'rel_diag_%s%s_tar_best_T'%(src_short, '_aug' if aug else '')))
print("T = %f, error_src = %f, error_tar = %f, ECE_src = %.2f%%, ECE_tar = %.2f%%"%(
T,
error_src, error_tar,
ece_src*100.0, ece_tar*100.0))
## plot
plt.figure(1)
plt.clf()
h1 = plt.plot(Ts, eces_tar, 'r-', label='target')
h2 = plt.plot(Ts, eces_src, 'b-', label='source')
plt.plot(Ts[np.argmin(np.array(eces_tar))], min(eces_tar), 'rs')
plt.plot(Ts[np.argmin(np.array(eces_src))], min(eces_src), 'bs')
plt.xlabel('temperature', fontsize=fontsize)
plt.ylabel('ECE (%%)', fontsize=fontsize)
plt.grid('on')
plt.legend(handles=[h2[0], h1[0]], fontsize=fontsize)
## save
plt.savefig(os.path.join(fig_root, 'plot_temp_trend_%s%s.png'%(src_short, '_aug' if aug else '')), bbox_inches='tight')
plt.close()
| apache-2.0 |
xubenben/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 253 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Jake Vanderplas <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
HeraclesHX/scikit-learn | sklearn/cluster/spectral.py | 233 | 18153 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux [email protected]
# Brian Cheung
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
miyyer/qb | qanta/experimental/chainer/main.py | 2 | 11389 | import os
import json
import random
import argparse
import itertools
import numpy as np
import logging
from tqdm import tqdm
import matplotlib
matplotlib.use("Agg")
import cupy
import chainer
import chainer.links as L
import chainer.functions as F
from chainer import training
from chainer.training import extensions
from chainer.dataset import concat_examples
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
answers_dir = "answers.json"
vocabulary_dir = "vocabulary.json"
train_questions_dir = "train_questions.json"
dev_questions_dir = "dev_questions.json"
class RNNModel(chainer.Chain):
def __init__(self, vocab_size, embed_size, hidden_size, output_size):
super(RNNModel, self).__init__()
with self.init_scope():
self.embed = L.EmbedID(vocab_size, embed_size)
self.rnn = L.LSTM(embed_size, hidden_size)
self.linear = L.Linear(hidden_size, output_size)
def __call__(self, xs):
"""
Forward pass of a sentence.
:param xs: a batch of sentences
:return h: final hidden states
"""
xs = self.embed(xs)
xs = F.swapaxes(xs, 0, 1) # time, batch, embed
self.rnn.reset_state()
for x in xs:
h = self.rnn(x)
h = F.tanh(self.linear(h))
return h
class DANModel(chainer.Chain):
def __init__(self, vocab_size, embed_size, hidden_size, output_size):
super(DANModel, self).__init__()
with self.init_scope():
self.embed = L.EmbedID(vocab_size, embed_size)
self.linear1 = L.Linear(embed_size, hidden_size)
self.linear2 = L.Linear(hidden_size, output_size)
def __call__(self, xs):
xs = self.embed(xs)
batch_size, length, _ = xs.shape
h = F.sum(xs, axis=1) / length
h = F.tanh(self.linear1(h))
h = F.tanh(self.linear2(h))
return h
def load_glove(glove_path, word_to_id, embed_size):
vocab_size = len(word_to_id)
embed_W = np.zeros((vocab_size, embed_size), dtype=np.float32)
with open(glove_path, "r") as fi:
logger.info("loading glove vectors..")
for line in tqdm(fi):
line = line.strip().split(" ")
word = line[0]
if word in word_to_id:
vec = np.array(line[1::], dtype=np.float32)
embed_W[word_to_id[word]] = vec
return embed_W
def converter(batch, device):
x, t = concat_examples(batch, device, 0)
return x, t
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--batch_size",
"-b",
type=int,
default=32,
help="Number of examples in each mini-batch",
)
parser.add_argument(
"--bproplen",
"-l",
type=int,
default=35,
help="Number of words in each mini-batch " "(= length of truncated BPTT)",
)
parser.add_argument(
"--epoch",
"-e",
type=int,
default=20,
help="Number of sweeps over the dataset to train",
)
parser.add_argument(
"--gpu", "-g", type=int, default=0, help="GPU ID (negative value indicates CPU)"
)
parser.add_argument(
"--gradclip",
"-c",
type=float,
default=5,
help="Gradient norm threshold to clip",
)
parser.add_argument(
"--out", "-o", default="result", help="Directory to output the result"
)
parser.add_argument(
"--resume", "-r", default="", help="Resume the training from snapshot"
)
parser.add_argument(
"--test", action="store_true", help="Use tiny datasets for quick tests"
)
parser.set_defaults(test=False)
parser.add_argument(
"--hidden_size",
type=int,
default=300,
help="Number of LSTM units in each layer",
)
parser.add_argument(
"--embed_size", type=int, default=300, help="Size of embeddings"
)
parser.add_argument(
"--model", "-m", default="model.npz", help="Model file name to serialize"
)
parser.add_argument(
"--glove",
default="data/glove.6B.300d.txt",
help="Path to glove embedding file.",
)
args = parser.parse_args()
return args
def main1():
args = parse_args()
logger.info("loading answers..")
with open(answers_dir, "r") as f:
checkpoint = json.loads(f.read())
id_to_answer = checkpoint["id_to_answer"]
answer_to_id = checkpoint["answer_to_id"]
logger.info("number of answers: {}".format(len(id_to_answer)))
logger.info("loading vocabulary..")
with open(vocabulary_dir, "r") as f:
checkpoint = json.loads(f.read())
id_to_word = checkpoint["id_to_word"]
word_to_id = checkpoint["word_to_id"]
logger.info("vocabulary size: {}".format(len(id_to_word)))
logger.info("loading questions..")
with open(train_questions_dir, "r") as f:
train_questions = json.loads(f.read())
with open(dev_questions_dir, "r") as f:
dev_questions = json.loads(f.read())
logger.info("number of training questions: {}".format(len(train_questions)))
logger.info("number of dev questions: {}".format(len(dev_questions)))
def convert_dataset(questions):
if isinstance(questions, dict):
questions = list(questions.values())
sentences = []
labels = []
for q in questions:
a = q["answer"]
a = answer_to_id[a] if isinstance(a, str) else a
for sent in q["sentences"]:
if isinstance(sent, list):
sent = np.array(sent, dtype=np.int32)
sentences.append(sent)
labels.append(a)
return list(zip(sentences, labels))
train_dataset = convert_dataset(train_questions)
dev_dataset = convert_dataset(dev_questions)
train_iter = chainer.iterators.SerialIterator(train_dataset, args.batch_size)
dev_iter = chainer.iterators.SerialIterator(
dev_dataset, args.batch_size, repeat=False
)
vocab_size = len(word_to_id)
output_size = len(answer_to_id)
model = DANModel(vocab_size, args.embed_size, args.hidden_size, output_size)
# if os.path.isfile(args.glove):
# rnn.embed.W.data = load_glove(
# args.glove, word_to_id, args.embed_size)
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
# model.predictor.embed.to_gpu()
optimizer = chainer.optimizers.SGD(lr=1.0)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.GradientClipping(args.gradclip))
iteration = 0
sum_loss = 0
sum_acc = 0
count = 0
while train_iter.epoch < args.epoch:
iteration += 1
count += 1
batch = train_iter.__next__()
x, t = converter(batch, args.gpu)
y = model(x)
loss = F.softmax_cross_entropy(y, t)
sum_loss += loss.data
sum_acc += F.accuracy(y, t).data
optimizer.target.cleargrads()
loss.backward()
optimizer.update()
if iteration % 10 == 0:
print("{}: {} {}".format(iteration, sum_loss / count, sum_acc / count))
sum_loss = 0
sum_acc = 0
count = 0
if train_iter.is_new_epoch:
print("epoch: ", train_iter.epoch)
def main():
args = parse_args()
logger.info("loading answers..")
with open(answers_dir, "r") as f:
checkpoint = json.loads(f.read())
id_to_answer = checkpoint["id_to_answer"]
answer_to_id = checkpoint["answer_to_id"]
logger.info("number of answers: {}".format(len(id_to_answer)))
logger.info("loading vocabulary..")
with open(vocabulary_dir, "r") as f:
checkpoint = json.loads(f.read())
id_to_word = checkpoint["id_to_word"]
word_to_id = checkpoint["word_to_id"]
logger.info("vocabulary size: {}".format(len(id_to_word)))
logger.info("loading questions..")
with open(train_questions_dir, "r") as f:
train_questions = json.loads(f.read())
with open(dev_questions_dir, "r") as f:
dev_questions = json.loads(f.read())
logger.info("number of training questions: {}".format(len(train_questions)))
logger.info("number of dev questions: {}".format(len(dev_questions)))
def convert_dataset(questions):
if isinstance(questions, dict):
questions = list(questions.values())
sentences = []
labels = []
for q in questions:
a = q["answer"]
a = answer_to_id[a] if isinstance(a, str) else a
for sent in q["sentences"]:
if isinstance(sent, list):
sent = np.array(sent, dtype=np.int32)
sentences.append(sent)
labels.append(a)
return list(zip(sentences, labels))
train_dataset = convert_dataset(train_questions)
dev_dataset = convert_dataset(dev_questions)
train_iter = chainer.iterators.SerialIterator(train_dataset, args.batch_size)
dev_iter = chainer.iterators.SerialIterator(
dev_dataset, args.batch_size, repeat=False
)
vocab_size = len(word_to_id)
output_size = len(answer_to_id)
rnn = DANModel(vocab_size, args.embed_size, args.hidden_size, output_size)
# if os.path.isfile(args.glove):
# rnn.embed.W.data = load_glove(
# args.glove, word_to_id, args.embed_size)
model = L.Classifier(rnn)
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
# model.predictor.embed.to_gpu()
optimizer = chainer.optimizers.SGD(lr=1.0)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.GradientClipping(args.gradclip))
updater = training.StandardUpdater(
train_iter, optimizer, converter=converter, device=args.gpu
)
trainer = training.Trainer(updater, (args.epoch, "epoch"), out=args.out)
eval_model = model.copy()
eval_rnn = eval_model.predictor
trainer.extend(
extensions.Evaluator(dev_iter, eval_model, converter=converter, device=args.gpu)
)
interval = 10 if args.test else 100
trainer.extend(extensions.LogReport(trigger=(interval, "iteration")))
trainer.extend(
extensions.PrintReport(
[
"epoch",
"main/loss",
"main/accuracy",
"validation/main/loss",
"validation/main/accuracy",
"elapsed_time",
]
),
trigger=(interval, "iteration"),
)
# trainer.extend(extensions.PlotReport([
# 'main/loss', 'validation/main/loss'],
# x_key='epoch', file_name='loss.png'))
# trainer.extend(extensions.PlotReport([
# 'main/accuracy', 'validation/main/accuracy'],
# x_key='epoch', file_name='accuracy.png'))
# trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.ProgressBar(update_interval=10))
# trainer.extend(extensions.snapshot())
# trainer.extend(extensions.snapshot_object(
# model, 'model_iter_{.updater.iteration}'))
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
chainer.serializers.save_npz(args.model, model)
if __name__ == "__main__":
main1()
| mit |
MiroK/lega | lega/legendre_basis.py | 1 | 14131 | from __future__ import division
from numpy.polynomial.legendre import leggauss, legval
from sympy import legendre, symbols, Expr, lambdify, Symbol, Number
from scipy.sparse import diags
from itertools import product
from .common import function, tensor_product
import numpy as np
def legendre_basis(N, symbol='x'):
'''Return first N Legendre polynomials as functions of symbol.'''
return [legendre(k, Symbol(symbol)) for k in range(N)]
def legendre_function(F):
'''
A linear combination of F_i and the legendre basis functions. If F is a
vector the result is a function of F. For F matrix the output is a function
of x, y.
'''
# 1d
if F.shape == (len(F), ):
basis = legendre_basis(len(F), 'x')
return function(basis, F)
# 2d
elif len(F.shape) == 2:
basis = tensor_product([legendre_basis(F.shape[0], 'x'),
legendre_basis(F.shape[1], 'y')])
# Collapse to coefs by row
F = F.flatten()
return function(basis, F)
# No 3d yet
else:
raise ValueError('For now F can be a a tensor of rank at most 2.')
def mass_matrix(N):
'''Mass matrix of legendre_basis(N).'''
return diags(np.array([2/(2*i+1) for i in range(N)]), 0)
def stiffness_matrix(N):
'''Stiffness matrix of legendre_basis(N).'''
# The matrix has part of the main diagonal on every second side diagonal
main_diag = np.array([sum(2*(2*k+1) for k in range(0 if i%2 else 1, i, 2))
for i in range(N)])
# Upper diagonals
offsets = range(0, N, 2)
diagonals = [main_diag[:N-offset] for offset in offsets]
# All diagonal
all_offsets = [-offset for offset in offsets[:0:-1]] + offsets
all_diagonals = [diagonal for diagonal in diagonals[:0:-1]] + diagonals
return diags(all_diagonals, all_offsets, shape=(N, N))
def backward_transformation_matrix(N):
'''
Compute NxN matrix with values N_ij = L_i(x_j) where L_i are N Legendre
polynomials and x_j are N GL quadrature points. This matrix is used for
backward Legendre transformation: Suppose function f is represented in
the wave number space by a vector F and let BL be the backward transformation
matrix. Then f(x_j) = F.BL[:, j] or f = F.BL or BL.T.F, and vector f
represents f in the real space.
'''
BL = np.zeros((N, N))
# Get points of the guadrature
points, _ = leggauss(N)
for i in range(N):
c = np.zeros(i+1)
c[-1] = 1
# Evaluate the i-th polynomial at all the points
row = legval(points, c)
BL[i, :] = row
return BL
class BackwardLegendreTransformation(object):
'''
Perform backward Legendre transformations. The transformation matrix
is computed only once.
'''
def __init__(self, N):
'''Cache the matrices.'''
if not isinstance(N, list):
N = [N]
assert len(N) < 3
self.__BL = [backward_transformation_matrix(n) for n in N]
def __call__(self, F):
'''Transform f from wave number space to physical space.'''
if len(self.__BL) == 1:
return (self.__BL[0].T).dot(F)
else:
return self.__BL[0].T.dot(F.dot(self.__BL[1]))
def asarray(self):
'''
Return the transformation matrix. Matrix dotted with the representation
in wave number space yields the representation in physical space.
'''
if len(self.__BL) == 1:
return self.__BL[0]
else:
return np.kron(self.__BL[0].T, self.__BL[1].T)
def forward_transformation_matrix(N):
'''
For any function f, we define its interpolant f_N as \sum_{i=0}^{N-1}F_i*L_i,
where L_i is the i-th Legendre polynomial and the coeffcients F_i are given
as F_i=\sum_{j=0}^{n-1}*f(xj)*w_j*L_i(x_j)/(L_i, L_i). The interpolant is
thus a polynomial of degree N-1. The reasoning behind the definition is that
is f were a polynomial of degre N-1 the integrals (f, L_i) having an integrand
of max degree 2N-2 would be exactly evaluated by the N-1 point GL gradrature.
Vector F is a representation of function f in the wave number space.
Computing F can be represented as matrix-vector product and is reffered to
as a forward Legendre transformation. Here we get the
matrix for the operatation FL.
'''
# Note that each row of FL could be computed by taking a dot of row of
# matrix BL.inv(M) with the vector of weight.
FL = np.zeros((N, N))
# Get point and weights of the guadrature
points, weights = leggauss(N)
for i in range(N):
c = np.zeros(i+1)
c[-1] = 1
# Evaluate te the i-th polynomial at all the points
row = legval(points, c)
# Now the element-wise with with weights, i.e. dot with weight vector
row *= weights
# Finally the (Li, Li) term, i.e. the inv(M)
row /= 2/(2*i+1)
FL[i, :] = row
return FL
class GLNodeEvaluation(object):
'''
Evaluate f at nodes of N point GL quadrature. The points are precomputed
and stored.
'''
def __init__(self, N):
'''Compute the evaluation points.'''
if not isinstance(N, list):
N = [N]
self.dim = len(N)
# This would work for any dim but since only 1d and 2d is supported in
# FLT and BLT I see no points in supporting it here.
assert self.dim < 3
self.shape = tuple(N)
# Get points for components
points_i = [leggauss(n)[0] for n in N]
# Combine as cartesian product
self.points = np.array([list(pis) for pis in product(*points_i)])
def __call__(self, f):
'''Evaluate f at points.'''
# In general the input should be some sort of (lambda) function
# Sympy functions are lambdified for fast numpy evaluation
dim = self.dim
points = self.points
xyz = symbols('x, y, z')
# Constant functions
if isinstance(f, (Number, int, float)):
f_values = float(f)*np.ones(len(points))
# Symbolic functions
elif isinstance(f, Expr):
# Symbolic function must be have the dim-th var present
# FIXME:Relax this?
assert xyz[dim-1] in f.atoms() or isinstance(f, Number)
f = lambdify(xyz[:dim], f, 'numpy')
# Lambdify makes it fast if we feed as arrays the x y z comps
# of points
f_values = f(*[points[:, i] for i in range(dim)])
# Python functions/lambdas
else:
# For (lambda)function I can only check the argcount
assert f.func_code.co_argcount == dim
f_values = np.array([f(*(p.tolist())) for p in points])
return f_values.reshape(self.shape)
class ForwardLegendreTransformation(object):
'''
Perform forward Legendre transformations. The transformation matrices
are computed only once and so are the nodes for evaluation.
'''
def __init__(self, N):
'''Cache the matrices.'''
if not isinstance(N, list):
N = [N]
assert len(N) < 3
self.__FL = [forward_transformation_matrix(n) for n in N]
# Make your own evaluator
self.__GLeval = GLNodeEvaluation(N)
def __call__(self, f):
'''Transform f to wave number space space.'''
F = self.__GLeval(f)
if len(self.__FL) == 1:
return self.__FL[0].dot(F)
else:
return self.__FL[0].dot(F.dot(self.__FL[1].T))
def asarray(self):
'''
Return the transformation matrix. For some f the matrix dotted with
f evalueted at GL nodal points yield the representation in the wave
number space.
'''
if len(self.__FL) == 1:
return self.__FL[0]
else:
return np.kron(self.__FL[0], self.__FL[1])
# -----------------------------------------------------------------------------
if __name__ == '__main__':
from sympy import simplify, sin, cos, pi, Symbol, S
from sympy.mpmath import quad
from sympy.plotting import plot
import matplotlib.pyplot as plt
from math import sqrt
test_1d = False
test_2d = True
if test_1d:
x = Symbol('x')
# Check the stiffness matrix
N = 16
basis = legendre_basis(N)
A = np.zeros((N, N))
for i, v in enumerate(basis):
integrand = v.diff(x, 1)**2
A[i, i] = quad(lambdify(x, integrand), [-1, 1])
for j, u in enumerate(basis[i+1:], i+1):
integrand = u.diff(x, 1)*v.diff(x, 1)
A[i, j] = quad(lambdify(x, integrand), [-1, 1])
A[j, i] = A[i, j]
A_ = stiffness_matrix(N).toarray()
assert np.all(abs(A - A_) < 1E-15)
# Check how NodeEval handles constants
f = 1
f_values = GLNodeEvaluation(4)(f)
assert np.allclose(f_values, np.ones_like(f_values))
g = S(2)
g_values = GLNodeEvaluation(4)(g)
assert np.allclose(g_values, 2*np.ones_like(g_values))
# First a polynomial should be interpolated/projected/FL-transform exactly
N = 8
f = x**7 - 4*x**5 + 1
f_ = lambdify(x, f, 'numpy')
F = ForwardLegendreTransformation(N)(f)
f_N = legendre_function(F)
e = simplify(f-f_N)
assert abs(sqrt(quad(lambdify(x, e**2), [-1, 1]))) < 1E-13
# And use f_N(x_j) = f(x_j) to test BL
f_N_values = BackwardLegendreTransformation(N)(F)
f_values = GLNodeEvaluation(N)(f)
# Allow some room for inexact numerics
assert np.all(np.abs(f_N_values - f_values) < 1E-10)
# Now take some `wilder` function and see how the interpolation quality
# improves
f = sin(x)*cos(pi*x**2)
f_ = lambdify(x, f, 'numpy')
tol = 1E-13
converged = False
N = 1
N_max = 100
# If you had expansion as F_ coeffs for a function then its L^2 norm could
# be computed via the mass matrix as sqrt(F_.M.F_)
# Take the largest space
F_ = ForwardLegendreTransformation(N_max)(f_)
Ns, errors = [], []
while not converged:
F = ForwardLegendreTransformation(N)(f_)
f_N = legendre_function(F)
e = simplify(f-f_N)
# Evaluare the L2 error by mpmath.quad which is adaptive and almost
# exact
error = sqrt(quad(lambdify(x, e**2), [-1, 1]))
# We compute the L2 error by the mass matrix taking f in the same space
# as F_ so this is not exact, but it's interesting, right? :)
e_ = F - F_[:N]
M = mass_matrix(N)
error_ = sqrt(e_.dot(M.dot(e_)))
print('N={:%d} L2={:%.4E} (mass)L2={:%.4E}'.format(N, error, error_))
Ns.append(N)
errors.append(error)
converged = error < tol or N >= N_max
N += 1
# See how the final interpolant compares to the function
pf = plot(f, (x, -1, 1), show=False)
pf_ = plot(f_N, (x, -1, 1), show=False)
pf_[0].line_color='red'
pf.append(pf_[0])
pf.show()
# Plot convergence history
plt.figure()
plt.loglog(Ns, errors)
plt.show()
if test_2d:
x, y = symbols('x, y')
# First a polynomial should be interpolated/projected/FL-transform exactly
N, M = 3, 4
f = x**2 + y**3
f_ = lambdify([x, y], f, 'numpy')
F = ForwardLegendreTransformation([N, M])(f)
f_NM = legendre_function(F)
e = simplify(f-f_NM)
assert abs(sqrt(quad(lambdify([x, y], e**2), [-1, 1], [-1, 1]))) < 1E-13
# Represent FLT as object acting on vector
F_nodes = GLNodeEvaluation([N, M])(f_).flatten()
F_mat = ForwardLegendreTransformation([N, M]).asarray()
F_ = F_mat.dot(F_nodes)
assert np.all((F.flatten() - F_) < 1E-14)
# What is the error with the mass matrix?
mass_N = mass_matrix(N)
mass_M = mass_matrix(M)
E = ForwardLegendreTransformation([N, M])(e)
assert abs(sqrt(np.trace((mass_N.dot(E)).dot(mass_M.dot(E.T))))) < 1E-13
# And use f_N(x_j) = f(x_j) to test BL
f_NM_values = BackwardLegendreTransformation([N, M])(F)
f_values = GLNodeEvaluation([N, M])(f)
# Allow some room for inexact numerics
assert np.all(np.abs(f_NM_values - f_values) < 1E-10)
# Represent BLT as object acting on vector
F_mat = BackwardLegendreTransformation([N, M]).asarray()
assert np.all((f_NM_values.flatten() - F_mat.dot(F.flatten())) < 1E-14)
# Now take some `wilder` function and see how the interpolation quality
# improves
f = sin(x)*cos(2*pi*y)
f_ = lambdify([x, y], f, 'numpy')
tol = 1E-13
converged = False
N = 2
N_max = 100
# If you had expansion as F_ coeffs for a function then its L^2 norm could
# be computed via the mass matrix as sqrt(F_.M.F_)
# Take the largest space
F_ = ForwardLegendreTransformation([N_max, N_max])(f_)
Ns, errors = [], []
while not converged:
F = ForwardLegendreTransformation([N, N])(f_)
# We compute the L2 error by the mass matrix taking f in the same space
# as F_ so this is not exact
E = F - F_[:N, :N]
M = mass_matrix(N)
error = sqrt(np.trace((M.dot(E)).dot(M.dot(E.T))))
# Alternatively and equivalently
# error = sqrt(((M.dot(E))*(M.dot(E.T)).T).sum())
print("N={:%d} (mass)L2={:%.4E}".format(N, error))
Ns.append(N)
errors.append(error)
converged = error < tol or N >= N_max
N += 1
# See how the final interpolant compares to the function
# from sympy.plotting import plot3d
# f_N = legendre_function(F)
# pf = plot3d(f-f_N, (x, -1, 1), (y, -1, 1))
# pf.show()
| mit |
ProgramFan/bentoo | bentoo/tools/collector.py | 1 | 45647 | # coding: utf-8
#
'''Collector - Test results collector
Collector scans a test project directory, parses all result files found and
saves all parsed results as a self-described data sheet in a file. One can then
use Analyser or other tools to investigate the resultant data sheet.
To use collector, simply following the argument document. To use the generated
data sheet, keep in mind that the data sheet is ralational database table alike
and the concrete format is backend specific. For sqlite3 backend, the result is
stored in a table named 'result'. The data sheet is designed to be easily
parsable by pandas, so the recommendation is to use pandas to investigate the
data.
'''
from __future__ import print_function, unicode_literals
import argparse
import csv
import fnmatch
import glob
import io
import json
import os
import re
import sqlite3
import string
import sys
import tarfile
from collections import OrderedDict
from functools import reduce
from bentoo.common.project import TestProjectReader
#
# Design Of Collector
#
# Collector consists of ResultScanner, DataParser, DataAggregator and
# StorageBackend. ResultScanner searches the test project for resultant data
# files, and generates a list of file paths (absolute path). DataParser parses
# each data file to generate a list of data tables. Each file shall contain the
# same number of data tables, and all data tables shall have the same shape.
# DataAggregator merge all or selected data tables into a large data table,
# dropping table columns if required. The StorageBackend is then responsible
# for storing the resultant data table.
#
# The ResultScanner, DataParser and StorageBackend is implemented using
# duck-typing to support multiple types of parsers and backends.
#
#
# ResultScanner
#
class FnmatchFilter(object):
def __init__(self, patterns, mode="include"):
assert (mode in ("include", "exclude"))
self.patterns = patterns
def match_any(path, patterns):
for m in patterns:
if fnmatch.fnmatch(path, m):
return True
return False
def null_check(path):
return True
def include_check(path):
return True if match_any(path, self.patterns) else False
def exclude_check(path):
return False if match_any(path, self.patterns) else True
if not patterns:
self.checker = null_check
elif mode == "include":
self.checker = include_check
else:
self.checker = exclude_check
def valid(self, input):
return self.checker(input)
class ResultScanner(object):
def __init__(self,
project_root,
case_filter=None,
filter_mode="include",
result_selector=None):
'''
Parameters
----------
project_root: string, root dir of the test project
case_filter: list of strings, wildcard strings to match cases
filter_mode: "include" or "exclude", how the filter is handled
result_selector: list of integers, index of selected results
'''
self.project = TestProjectReader(project_root)
self.case_filter = FnmatchFilter(case_filter, filter_mode)
self.result_selector = result_selector
def iterfiles(self, with_stdout=False):
for case in self.project.itercases():
if not self.case_filter.valid(case["id"]):
continue
fullpath = case["fullpath"]
result_files = list(case["spec"]["results"])
if with_stdout and "STDOUT" not in result_files:
result_files.append("STDOUT")
result_selector = self.result_selector
if not result_selector:
result_selector = range(len(result_files))
for result_id in result_selector:
fn = os.path.join(fullpath, result_files[result_id])
short_fn = os.path.relpath(fn, self.project.project_root)
if not os.path.exists(fn):
print("WARNING: Result file '%s' not found" % short_fn)
continue
spec = list(
zip(self.project.test_factors + ["result_id"],
case["test_vector"] + [result_id]))
spec = OrderedDict(spec)
yield {"spec": spec, "fullpath": fn, "short_fn": short_fn}
#
# DataParser
#
def parse_jasminlog(fn, use_table=None):
'''parse_jasminlog - jasmin time manager log parser
This function parses jasmin timer manager performance reports. It
splits detected jasmin timer logs into several measures and assign
them unique names.
Assume the following log::
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
TOTAL
WALLCLOCK TIME
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Timer Name Proc: 0 Max
alg::NumericalIntegratorComponent::computing() 3.55 (100%) 7.3(90%)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Examples
--------
The use of this function is fairly straight forward:
>>> info = parse_jasminlog("demo_jasminlog.txt")
>>> info[-1]["data"][0]["timer_name"]
TOTAL_RUN_TIME
>>> info[-1]["data"][0]["summed"]
5.14626
'''
def tokenlize(s):
'''Convert string into a valid lower case pythonic token'''
invalid_chars = r"[:-+*/#\n]"
return "_".join(
map(lambda x: x.lower(),
re.sub(invalid_chars, " ", s).split()))
avail_types = {
"TimerName": str,
"proc_0": float,
"proc_0_percent": float,
"summed": float,
"summed_percent": float,
"max": float,
"max_percent": float,
"proc": int
}
table_id = 0
content = open(fn, "r").read()
logtbl_ptn = re.compile(
r"^\+{80}$(?P<name>.*?)^\+{80}$" + ".*?(?P<header>^.*?$)" +
r"(?P<content>.*?)^\+{80}$", re.M + re.S)
for match in logtbl_ptn.finditer(content):
log_table = match.groupdict()
table_name = tokenlize(log_table["name"])
# We only handle table "TOTAL WALLCLOCK TIME"
if table_name != "total_wallclock_time":
continue
# skipping tables not wanted, but null use_table means use all tables
if use_table and table_id not in use_table:
continue
# Extract table header
header_ptn = re.compile(r"(Timer Name|Proc: \d+|Summed|Proc|Max)")
header = list(map(tokenlize, header_ptn.findall(log_table["header"])))
assert (header[0] == "timer_name")
header[0] = "TimerName"
# Parse table rows
table_contents = []
for ln in log_table["content"].strip().split("\n"):
rec_ptn = re.compile(r"^\s*(TOTAL RUN TIME:|\S+)\s*(.*)$")
tl, tr = rec_ptn.search(ln).groups()
timer_name = tl.strip()
if timer_name == "TOTAL RUN TIME:":
timer_name = "TOTAL_RUN_TIME"
timer_rec = {"TimerName": avail_types["TimerName"](timer_name)}
flt_ptn = r"[-+]?(?:\d+(?:\.\d*)?)(?:[Ee][-+]?\d+)?"
seg_ptn = re.compile(r"({0})\s*(\({0}%\))?".format(flt_ptn))
for i, seg in enumerate(seg_ptn.finditer(tr)):
# Example: 99.9938 (97%)
a, b = seg.groups()
cn = header[i + 1]
timer_rec[cn] = avail_types[cn](a)
if b:
pn = "{0}_percent".format(cn)
timer_rec[pn] = avail_types[pn](b[1:-2]) * 0.01
table_contents.append(timer_rec)
# Fix table header when there are XX% records
for k in table_contents[0].keys():
if k not in header:
header.append(k)
# Make final result:
# Ensures `len(header) == len(types)` and `[len(data_item) ==
# len(header) for data_item in data]`. So the data is in good shape.
types = [avail_types[x] for x in header]
data = [[v.get(k, None) for k in header] for v in table_contents]
table = {
"table_id": table_id,
"column_names": header,
"column_types": types,
"data": data
}
yield table
table_id += 1
class JasminParser(object):
@staticmethod
def register_cmd_args(argparser):
pass
@staticmethod
def retrive_cmd_args(namespace):
return {}
def __init__(self, use_table, args):
self.use_table = use_table
def itertables(self, fn):
tables = [t for t in parse_jasminlog(fn)]
if not tables:
yield
return
if self.use_table:
for i in self.use_table:
yield tables[i]
else:
for t in tables:
yield t
def parse_jasmin4log(fn, use_table=None):
'''parse_jasmin4log - jasmin 4.0 time manager log parser
This function parses jasmin 4.0 timer manager performance reports. It
splits detected jasmin timer logs into several measures and assign
them unique names.
Assume the following log::
*************************** TIME STATISTICS ****************************
-------------------------------------------------------------------------
Name MaxTime
-------------------------------------------------------------------------
TOTAL RUN TIME 0.9065(100.00%)
algs::SimpleHierarchyTimeIntegrator::advanceHierarchy() 0.8624(95.14%)
-------------------------------------------------------------------------
Examples
--------
The use of this function is fairly straight forward:
>>> info = parse_jasmin4log("demo_jasmin4log.txt")
>>> info[-1]["data"][0]["Name"]
TOTAL RUN TIME
>>> info[-1]["data"][0]["Accesses"]
1
'''
avail_types = {
"TimerName": str,
"MaxTime": float,
"MaxTime_percent": float,
"AvgTime": float,
"AvgTime_percent": float,
"LoadBalance": float,
"Accesses": int,
"Overhead": float,
"LocalMaxTime": float,
"LocalAvgTime": float,
"MaxLoc": int,
"LocalMaxLoc": int,
}
table_id = 0
content = open(fn, "r").read()
logtbl_ptn = re.compile(
r"^\*+ (?P<name>.*?) \*+$\n-{10,}\n" +
r"^(?P<header>^.*?$)\n-{10,}\n" + r"(?P<content>.*?)^-{10,}\n",
re.M + re.S)
for match in logtbl_ptn.finditer(content):
# skipping tables not wanted, but null use_table means use all tables
if use_table and table_id not in use_table:
continue
# TODO: use column width to better split columns. the columns names and
# width can be determined from the header, everything is right aligned.
log_table = match.groupdict()
# Extract table header
header = log_table["header"].split()
assert (header[0] == "Name")
header[0] = "TimerName"
timer_name_pos = log_table["header"].index("Name")
timer_value_pos = timer_name_pos + len("Name")
# Parse table rows
table_contents = []
for ln in log_table["content"].split("\n"):
# skip empty lines
if not ln.strip():
continue
timer_rec = {}
# split out the timer name column first, it may contain strange
# charactors such as spaces.
timer_name = ln[:timer_value_pos]
timer_rec["TimerName"] = timer_name.strip()
timer_values = ln[timer_value_pos:]
seg_ptn = re.compile(r"(\S+)")
for i, seg in enumerate(seg_ptn.finditer(timer_values)):
cn = header[i + 1]
val = seg.group(1)
flt_ptn = r"[-+]?(?:\d+(?:\.\d*)?)(?:[Ee][-+]?\d+)?|[+-]?nan"
m = re.match(r"({0})\(({0})%\)".format(flt_ptn), val)
if m:
pn = "{0}_percent".format(cn)
a, b = list(map(float, [m.group(1), m.group(2)]))
b = b * 0.01
timer_rec[cn], timer_rec[pn] = a, b
continue
m = re.match(r"({0})%".format(flt_ptn), val)
if m:
timer_rec[cn] = float(m.group(1)) * 0.01
continue
timer_rec[cn] = avail_types[cn](val)
table_contents.append(timer_rec)
# Fix table header when there are XX% records
for k in table_contents[0].keys():
if k not in header:
header.append(k)
# Make final result
# Ensures `len(header) == len(types)` and `[len(data_item) ==
# len(header) for data_item in data]`. So the data is in good shape.
types = [avail_types.get(x, str) for x in header]
data = [[v.get(k, None) for k in header] for v in table_contents]
table = {
"table_id": table_id,
"column_names": header,
"column_types": types,
"data": data
}
yield table
table_id += 1
class Jasmin4Parser(object):
@staticmethod
def register_cmd_args(argparser):
pass
@staticmethod
def retrive_cmd_args(namespace):
return {}
def __init__(self, use_table, args):
self.use_table = use_table
def itertables(self, fn):
tables = [t for t in parse_jasmin4log(fn)]
if not tables:
yield
return
if self.use_table:
for i in self.use_table:
yield tables[i]
else:
for t in tables:
yield t
class UnifiedJasminParser(object):
@staticmethod
def register_cmd_args(argparser):
pass
@staticmethod
def retrive_cmd_args(namespace):
return {}
def __init__(self, use_table, args):
self.use_table = use_table
jasmin4_ptn = re.compile(
r"^\*+ (?P<name>.*?) \*+$\n-{10,}\n" +
r"^(?P<header>^.*?$)\n-{10,}\n" + r"(?P<content>.*?)^-{10,}\n",
re.M + re.S)
jasmin3_ptn = re.compile(
r"^\+{80}$(?P<name>.*?)^\+{80}$" + ".*?(?P<header>^.*?$)" +
r"(?P<content>.*?)^\+{80}$", re.M + re.S)
def detector(fn):
content = open(fn).read()
if jasmin3_ptn.search(content):
return "jasmin3"
elif jasmin4_ptn.search(content):
return "jasmin4"
else:
return "null"
def null_parse(fn, use_table):
# return + yield makes a perfect empty generator function
return
yield
self.filetype_detector = detector
self.parser_funcs = {
"jasmin3": parse_jasminlog,
"jasmin4": parse_jasmin4log,
"null": null_parse
}
def itertables(self, fn):
filetype = self.filetype_detector(fn)
tables = [t for t in self.parser_funcs[filetype](fn)]
if not tables:
yield
return
if self.use_table:
for i in self.use_table:
yield tables[i]
else:
for t in tables:
yield t
#
# Likwid Parser
#
class BlockReader(object):
def __init__(self, start, end, use_regex=False):
if use_regex:
self.start_ = re.compile(start)
def match_start(x):
return self.start_.match(x)
self.match_start = match_start
self.end = re.compile(end)
def match_end(x):
return self.end_.match(x)
self.match_end = match_end
else:
def match_start(x):
return x == self.start_
def match_end(x):
return x == self.end_
self.start_ = start
self.end_ = end
self.match_start = match_start
self.match_end = match_end
def iterblocks(self, iterable):
while True:
try:
block = []
while not self.match_start(next(iterable)):
continue
line = next(iterable)
while not self.match_end(line):
block.append(line)
line = next(iterable)
yield block
except StopIteration:
return
def findblock(self, iterable):
block = []
while not self.match_start(next(iterable)):
continue
line = next(iterable)
while not self.match_end(line):
block.append(line)
line = next(iterable)
return block
class LikwidBlockParser(object):
def __init__(self):
self.column_names = []
self.column_types = []
self.data = []
def clear(self):
self.column_names = []
self.column_types = []
self.data = []
def process(self, iterable):
self.clear()
next(iterable)
line2 = next(iterable)
cpu_cycles = float(line2.split(":")[-1].strip())
other = [x for x in iterable]
other = io.StringIO("".join(other[1:-1]))
likwid_data = csv.DictReader(other)
# NOTE: likwid output use RegionTag as TimerName, as well as a
# different order. We fix it here.
start_columns = "ThreadId,TimerName,time,CallCount,inverseClock".split(
",")
self.column_names.extend(start_columns)
self.column_types.extend([int, str, float, int, float])
other_columns = list(likwid_data.fieldnames[4:])
other_columns = [x for x in other_columns if x]
self.column_names.extend(other_columns)
self.column_types.extend([float] * len(other_columns))
self.data = []
for record in likwid_data:
result = [
record["ThreadId"], record["RegionTag"], record["RDTSC"],
record["CallCount"]
]
result.append(1.0 / cpu_cycles)
result.extend(record[f] for f in other_columns)
self.data.append(result)
class LikwidParser(object):
@staticmethod
def register_cmd_args(argparser):
pass
@staticmethod
def retrive_cmd_args(namespace):
return {}
def __init__(self, use_table, args):
self.args = args
self.use_table = use_table
def itertables(self, fn):
# We only accept fake file "LIKWID_${GROUP}", so check it.
filename = os.path.basename(fn)
if not filename.startswith("LIKWID_"):
raise RuntimeError(
"Invalid data file '%s', shall be 'LIKWID_${GROUP}'" % fn)
# Search for real likwid data file, it shall be of regex
# 'likwid_counters.\d+.dat'
result_dir = os.path.dirname(fn)
likwid_data = glob.glob(
os.path.join(result_dir, "likwid_counters.*.dat"))
if not likwid_data:
print("WARNING: No likwid data file found in '%s'" % result_dir)
return
files = [open(path) for path in likwid_data]
parser = LikwidBlockParser()
likwid_block = BlockReader("@start_likwid\n", "@end_likwid\n")
# Count blocks to ease table_id generating
nblocks = len(list(likwid_block.iterblocks(files[0])))
if nblocks == 0:
print("WARNING: No likwid data table found in '%s'" %
likwid_data[0])
return
# Reset files[0] as iterblocks have already arrive eof.
files[0] = open(likwid_data[0])
all_tables = []
for table_id in range(nblocks):
data = []
for i, f in enumerate(files):
proc_id = int(os.path.basename(likwid_data[i]).split(".")[1])
block = likwid_block.findblock(f)
assert (block)
parser.process(iter(block))
for d in parser.data:
data.append([proc_id] + d)
cn = ["ProcId"] + parser.column_names
ct = [int] + parser.column_types
all_tables.append({
"table_id": table_id,
"column_names": cn,
"column_types": ct,
"data": data
})
if not all_tables:
yield
return
if self.use_table:
for i in self.use_table:
yield all_tables[i]
else:
for t in all_tables:
yield t
class UdcBlockParser(object):
def __init__(self):
self.column_names = []
self.column_types = []
self.data = []
def clear(self):
self.column_names = []
self.column_types = []
self.data = []
def process(self, iterable):
self.clear()
content = [x for x in iterable]
content = io.StringIO("".join(content))
data = csv.DictReader(content)
start_columns = "ThreadId,TimerName".split(",")
self.column_names.extend(start_columns)
self.column_types.extend([int, str])
other_columns = list(data.fieldnames[2:])
other_columns = [x for x in other_columns if x]
self.column_names.extend(other_columns)
self.column_types.extend([float] * len(other_columns))
self.data = []
for record in data:
result = [record["ThreadId"], record["TimerName"]]
result.extend(record[f] for f in other_columns)
self.data.append(result)
class UdcParser(object):
@staticmethod
def register_cmd_args(argparser):
pass
@staticmethod
def retrive_cmd_args(namespace):
return {}
def __init__(self, use_table, args):
self.args = args
self.use_table = use_table
def itertables(self, fn):
# We only accept fake file "LIKWID_${GROUP}", so check it.
filename = os.path.basename(fn)
if filename != "USER_DEFINED_COUNTERS":
raise ValueError(
"Invalid data file '%s', shall be 'USER_DEFINED_COUNTERS'" %
fn)
# Search for real data file, it shall be of regex
# 'user_defined_counters.\d+.dat'
result_dir = os.path.dirname(fn)
udc_data = glob.glob(
os.path.join(result_dir, "user_defined_counters.*.dat"))
if not udc_data:
print("WARNING: No data file found in '%s'" % result_dir)
return
files = [open(path) for path in udc_data]
parser = UdcBlockParser()
block_reader = BlockReader("@start_udc\n", "@end_udc\n")
# Count blocks to ease table_id generating
nblocks = len(list(block_reader.iterblocks(files[0])))
if nblocks == 0:
print("WARNING: No udc data table found in '%s'" % udc_data[0])
return
# Reset files[0] as iterblocks have already arrive eof.
files[0] = open(udc_data[0])
all_tables = []
for table_id in range(nblocks):
data = []
for i, f in enumerate(files):
proc_id = int(os.path.basename(udc_data[i]).split(".")[1])
block = block_reader.findblock(f)
assert (block)
parser.process(iter(block))
for d in parser.data:
data.append([proc_id] + d)
cn = ["ProcId"] + parser.column_names
ct = [int] + parser.column_types
all_tables.append({
"table_id": table_id,
"column_names": cn,
"column_types": ct,
"data": data
})
if not all_tables:
yield
return
if self.use_table:
for i in self.use_table:
yield all_tables[i]
else:
for t in all_tables:
yield t
def identifier(val):
'''Convert a string to a valid c identifier'''
a = re.sub(r"\W", "_", str(val).strip().lower())
return re.sub(r"_+", "_", a.strip("_"))
class YamlParser(object):
'''Yaml table parser.
This parser parses yaml document blocks in a file and convert them to a list
of data tables. Each yaml block starts with a line `---` and ends with
another line of either `---` or `...`, and the contents shall be a dict or a
list of dicts. For example:
The performance results: (time in usecs):
---
time: 13.3
float_ops: 112343449
men_reads: 11334349399
mem_writes: 33449934
---
'''
@staticmethod
def register_cmd_args(argparser):
pass
@staticmethod
def retrive_cmd_args(namespace):
return {}
def __init__(self, use_table, args):
self.args = args
self.use_table = use_table
def itertables(self, fn):
import bentoo.yaml as yaml
all_tables = []
yamldoc_regex = re.compile(
r"(?:^|\n)\s*---\s*\n(.+?)\n\s*(---|...)\s*\n", re.M + re.S)
for i, match in enumerate(yamldoc_regex.finditer(open(fn).read())):
content = yaml.safe_load(match.group(1))
if isinstance(content, dict):
# a single dict
cn = list(map(identifier, list(content.keys())))
vals = list(content.values())
ct = [type(x) for x in vals]
data = [vals]
elif isinstance(content, list):
# a list of dicts
assert content
cn = list(map(identifier, list(content[0].keys())))
ct = [type(x) for x in content[0].values()]
data = []
for item in content:
assert set(cn) == set(item.keys())
val = [item[x] for x in cn]
data.append(val)
else:
raise RuntimeError(
"Unsupported yaml table: {}".format(content))
all_tables.append({
"table_id": i,
"column_names": cn,
"column_types": ct,
"data": data
})
if not all_tables:
yield
return
if self.use_table:
for i in self.use_table:
yield all_tables[i]
else:
for t in all_tables:
yield t
def guess_type(data):
'''Guess the best column types for a string table'''
type_hierarchy = [int, float, str] # start from int, str for all
def promote_type(val, type_index):
for i in range(type_index, len(type_hierarchy) + 1):
t = type_hierarchy[i]
try:
t(val)
return i
except ValueError:
continue
# for each colum of each row, we promote the type to be able to represent
# all values accurately.
curr_types = [0] * len(data[0])
for row in data:
for i, val in enumerate(row):
curr_types[i] = promote_type(val, curr_types[i])
return [type_hierarchy[i] for i in curr_types]
class PipetableParser(object):
'''Pipetable table parser.
This parser parses markdown's pipe tables in a file and convert them to a
list of data tables.
The performance results: (time in usecs):
| time | float_ops | mem_reads | mem_writes |
|------|-----------|-----------|------------|
| 13.3 | 334e5 | 3334456555| 334343434 |
'''
@staticmethod
def register_cmd_args(argparser):
pass
@staticmethod
def retrive_cmd_args(namespace):
return {}
def __init__(self, use_table, args):
self.args = args
self.use_table = use_table
def itertables(self, fn):
all_tables = []
# We use a loose regex for pipe table: each table row shall begin with
# `|` and end with `|` and seperate with other contents by new lines.
# Note we match `\n` explicitly since we restrict each table row to one
# line.
table_regex = re.compile(r"(?:^|\n)" + r"(\s*\|.+\|\s*\n)" +
r"\s*\|[-:| ]+\|\s*\n" +
r"((?:\s*\|.+\|\s*\n)+)" + r"(?:\n|$)")
for i, match in enumerate(table_regex.finditer(open(fn).read())):
parse_row = lambda x: [
y.strip() for y in x.strip().strip('|').split('|')
]
header = match.group(1)
content = match.group(2)
header = parse_row(header)
data = []
for line in content.split('\n'):
if line.strip():
row = parse_row(line.strip())
data.append(row)
column_types = guess_type(data)
real_data = []
for row in data:
real_data.append(
[column_types[i](v) for i, v in enumerate(row)])
all_tables.append({
"table_id": i,
"column_names": header,
"column_types": column_types,
"data": real_data
})
if not all_tables:
yield
return
if self.use_table:
for i in self.use_table:
yield all_tables[i]
else:
for t in all_tables:
yield t
class DsvParser(object):
'''Delimiter seperated values table parser.
This parser parses dsv tables in a file and convert them to a list of data
tables. The table is seperated from surrounding texts by line of `|={3,}`
and the default delimiter is `,` for csv.
The performance results: (time in usecs):
|===
time, float_ops, mem_reads, mem_writes
13.3, 334e5, 3334456555, 334343434
|===
'''
@staticmethod
def register_cmd_args(argparser):
argparser.add_argument(
"--dsv-seperator",
metavar="CHAR",
dest="dsv_sep",
default=",",
help="regex seperator for dsv values (default: ',')")
@staticmethod
def retrive_cmd_args(namespace):
return {"sep": namespace.csv_sep}
def __init__(self, use_table=[], args={'sep': ','}):
self.args = args
self.use_table = use_table
def itertables(self, fn):
all_tables = []
table_regex = re.compile(r"\s*\|={3,}\s*\n" + r"((?:[^|]+\n){2,})" +
r"\s*\|={3,}\n")
for i, match in enumerate(table_regex.finditer(open(fn).read())):
parse_row = lambda x: [
y.strip() for y in re.split(self.args['sep'], x.strip())
]
content = match.group(1).split('\n')
header = content[0]
header = parse_row(header)
data = []
for line in content[1:]:
if line.strip():
row = parse_row(line.strip())
data.append(row)
column_types = guess_type(data)
real_data = []
for row in data:
real_data.append(
[column_types[i](v) for i, v in enumerate(row)])
all_tables.append({
"table_id": i,
"column_names": header,
"column_types": column_types,
"data": real_data
})
if not all_tables:
yield
return
if self.use_table:
for i in self.use_table:
yield all_tables[i]
else:
for t in all_tables:
yield t
class ParserFactory(object):
@staticmethod
def default_parser():
return "jasmin"
@staticmethod
def available_parsers():
return ("yaml", "pipetable", "jasmin", "jasmin3", "jasmin4", "likwid",
"udc", "dsv")
@staticmethod
def create(name, namespace):
use_table = list(map(int, namespace.use_table))
if name == "jasmin3":
args = JasminParser.retrive_cmd_args(namespace)
return JasminParser(use_table, args)
elif name == "jasmin4":
args = Jasmin4Parser.retrive_cmd_args(namespace)
return Jasmin4Parser(use_table, args)
elif name == "jasmin":
args = UnifiedJasminParser.retrive_cmd_args(namespace)
return UnifiedJasminParser(use_table, args)
elif name == "likwid":
args = LikwidParser.retrive_cmd_args(namespace)
return LikwidParser(use_table, args)
elif name == "udc":
args = UdcParser.retrive_cmd_args(namespace)
return UdcParser(use_table, args)
elif name == "yaml":
args = YamlParser.retrive_cmd_args(namespace)
return YamlParser(use_table, args)
elif name == "pipetable":
args = PipetableParser.retrive_cmd_args(namespace)
return PipetableParser(use_table, args)
elif name == "dsv":
args = DsvParser.retrive_cmd_args(namespace)
return DsvParser(use_table, args)
else:
raise ValueError("Unsupported parser: %s" % name)
@staticmethod
def register_cmd_args(argparser):
group = argparser.add_argument_group("jasmin3 parser arguments")
JasminParser.register_cmd_args(group)
group = argparser.add_argument_group("jasmin4 parser arguments")
Jasmin4Parser.register_cmd_args(group)
group = argparser.add_argument_group("jasmin parser arguments")
UnifiedJasminParser.register_cmd_args(group)
group = argparser.add_argument_group("likwid parser arguments")
LikwidParser.register_cmd_args(group)
group = argparser.add_argument_group("udc parser arguments")
UdcParser.register_cmd_args(group)
group = argparser.add_argument_group("yaml parser arguments")
YamlParser.register_cmd_args(group)
group = argparser.add_argument_group("pipetable parser arguments")
PipetableParser.register_cmd_args(group)
group = argparser.add_argument_group("dsv parser arguments")
DsvParser.register_cmd_args(group)
#
# StorageBackend
#
class SqliteSerializer(object):
typemap = {
type(None): "NULL",
int: "INTEGER",
int: "INTEGER",
float: "REAL",
str: "TEXT",
str: "TEXT",
bytes: "BLOB"
}
@staticmethod
def register_cmd_args(argparser):
pass
@staticmethod
def retrive_cmd_args(namespace):
return {}
def __init__(self, dbfile, args):
self.dbfile = dbfile
def serialize(self, data_items, column_names, column_types):
'''Dump content to database
'''
conn = sqlite3.connect(self.dbfile)
conn.execute("DROP TABLE IF EXISTS result")
conn.commit()
self.conn = conn
# Build table creation and insertion SQL statements
column_segs = []
for i, column_name in enumerate(column_names):
t = column_types[i]
assert t in SqliteSerializer.typemap
tn = SqliteSerializer.typemap[t]
column_segs.append("\"{0}\" {1}".format(column_name, tn))
create_columns_sql = ", ".join(column_segs)
create_table_sql = "CREATE TABLE result ({0})".format(
create_columns_sql)
ph_sql = ", ".join(["?"] * len(column_names))
insert_row_sql = "INSERT INTO result VALUES ({0})".format(ph_sql)
# Create table and insert data items
cur = self.conn.cursor()
cur.execute(create_table_sql)
for item in data_items:
assert isinstance(item, list) or isinstance(item, tuple)
assert len(item) == len(column_names)
cur.execute(insert_row_sql, item)
self.conn.commit()
self.conn.close()
class PandasSerializer(object):
@staticmethod
def register_cmd_args(argparser):
argparser.add_argument("--pandas-format",
default="xlsx",
choices=("xls", "xlsx", "csv"),
help="Output file format")
@staticmethod
def retrive_cmd_args(namespace):
return {"format": namespace.pandas_format}
def __init__(self, data_file, args):
self.data_file = data_file
self.file_format = args["format"]
def serialize(self, data_items, column_names, column_types):
import numpy
import pandas
# column_types is not used becuase numpy automatically deduce the best
# type for each data item.
data = numpy.array(list(data_items))
frame = pandas.DataFrame(data, columns=column_names)
if self.file_format == "xls" or self.file_format == "xlsx":
frame.to_excel(self.data_file, index=False)
elif self.file_format == "csv":
frame.to_csv(self.data_file, index=False)
else:
raise RuntimeError("Unsupported output format '%s'" %
self.file_format)
class SerializerFactory(object):
@staticmethod
def default_serializer():
return "sqlite3"
@staticmethod
def available_serializers():
return ("sqlite3", "pandas")
@staticmethod
def create(name, namespace):
if name == "sqlite3":
args = SqliteSerializer.retrive_cmd_args(namespace)
return SqliteSerializer(namespace.data_file, args)
elif name == "pandas":
args = PandasSerializer.retrive_cmd_args(namespace)
return PandasSerializer(namespace.data_file, args)
else:
raise ValueError("Unsupported serializer: %s" % name)
@staticmethod
def register_cmd_args(argparser):
group = argparser.add_argument_group("sqlite3 serializer arguments")
SqliteSerializer.register_cmd_args(group)
group = argparser.add_argument_group("pandas serializer arguments")
PandasSerializer.register_cmd_args(group)
#
# DataAggragator
#
# DataAggregator iterates over a list of data tables, filters unwanted collums
# and merges the results into a large data table. The tables shall have the
# same column names and types. Each table is identified by a unique id, which
# itself is an OrderedDict. All ids also share the same keys in the same order.
#
class DataAggregator(object):
def __init__(self, column_filter=None, filter_mode="include"):
assert (filter_mode in ("include", "exclude"))
self.filter = FnmatchFilter(column_filter, filter_mode)
def aggregate(self, tables):
if type(tables) is list:
tables = iter(tables)
# Probe table structure
try:
first_table = next(tables)
except StopIteration:
print("WARNING: No data tables found")
return None
table_id = first_table["id"]
table_content = first_table["content"]
all_names = list(table_id.keys()) + table_content["column_names"]
all_types = [type(x) for x in list(table_id.values())]
all_types.extend(table_content["column_types"])
ds = [i for i, n in enumerate(all_names) if self.filter.valid(n)]
column_names = [all_names[i] for i in ds]
column_types = [all_types[i] for i in ds]
def data_generator():
table_id = first_table["id"]
for item in first_table["content"]["data"]:
all_values = list(table_id.values()) + item
yield [all_values[i] for i in ds]
for table in tables:
table_id = table["id"]
for item in table["content"]["data"]:
all_values = list(table_id.values()) + item
yield [all_values[i] for i in ds]
return {
"column_names": column_names,
"column_types": column_types,
"data": data_generator()
}
class Collector(object):
def __init__(self):
pass
def collect(self, scanner, parser, aggregator, serializer, archive):
def table_geneartor():
for data_file in scanner.iterfiles():
file_spec = data_file["spec"]
for tbl in parser.itertables(data_file["fullpath"]):
if not tbl:
continue
spec = OrderedDict(file_spec)
spec["table_id"] = tbl["table_id"]
yield {"id": spec, "content": tbl}
final_table = aggregator.aggregate(table_geneartor())
if not final_table:
return
serializer.serialize(final_table["data"], final_table["column_names"],
final_table["column_types"])
if archive:
fns = [(x["fullpath"], x["short_fn"])
for x in scanner.iterfiles(with_stdout=True)]
with tarfile.open(archive, "w:gz") as tar:
for full_fn, short_fn in fns:
if not os.path.exists(full_fn):
continue
arcname = os.path.join("result-files", short_fn)
tar.add(full_fn, arcname=arcname)
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("project_root", help="Test project root directory")
parser.add_argument("data_file", help="Data file to save results")
group = parser.add_argument_group("Scanner Arguments")
grp = group.add_mutually_exclusive_group()
grp.add_argument("-i",
"--include",
default=None,
nargs="+",
metavar="CASE_PATH",
help="Include only matched cases (shell wildcards)")
grp.add_argument("-e",
"--exclude",
default=None,
nargs="+",
metavar="CASE_PATH",
help="Excluded matched cases (shell wildcards)")
group.add_argument("--use-result",
default=[0],
nargs="+",
metavar="RESULT_ID",
help="Choose result files to use (as index)")
group = parser.add_argument_group("Parser Arguments")
group.add_argument("-p",
"--parser",
default=ParserFactory.default_parser(),
choices=ParserFactory.available_parsers(),
help="Parser for raw result files (default: jasmin)")
group.add_argument("--use-table",
default=[],
nargs="+",
metavar="TABLE_ID",
help="Choose which data table to use (as index)")
ParserFactory.register_cmd_args(parser)
group = parser.add_argument_group("Aggregator Arguments")
grp = group.add_mutually_exclusive_group()
grp.add_argument("-d",
"--drop-columns",
default=None,
nargs="+",
metavar="COLUMN_NAME",
help="Drop un-wanted table columns")
grp.add_argument("-k",
"--keep-columns",
default=None,
nargs="+",
metavar="COLUMN_NAME",
help="Keep only speciied table columns")
group = parser.add_argument_group("Serializer Arguments")
group.add_argument("-s",
"--serializer",
choices=SerializerFactory.available_serializers(),
default=SerializerFactory.default_serializer(),
help="Serializer to dump results (default: sqlite3)")
SerializerFactory.register_cmd_args(parser)
group = parser.add_argument_group("Archiver Arguments")
group.add_argument("-a",
"--archive",
metavar="FILE",
dest="archive",
default=None,
help="Archive output to a zip file")
args = parser.parse_args()
# make scanner
if args.include:
case_filter = args.include
filter_mode = "include"
elif args.exclude:
case_filter = args.exclude
filter_mode = "exclude"
else:
case_filter = None
filter_mode = "exclude"
use_result = list(map(int, args.use_result))
scanner = ResultScanner(args.project_root, case_filter, filter_mode,
use_result)
# make parser
parser = ParserFactory.create(args.parser, args)
# make aggregator
if args.keep_columns:
column_filter = args.keep_columns
filter_mode = "include"
elif args.drop_columns:
column_filter = args.drop_columns
filter_mode = "exclude"
else:
column_filter = None
filter_mode = "exclude"
aggregator = DataAggregator(column_filter, filter_mode)
# make serializer
serializer = SerializerFactory.create(args.serializer, args)
# assemble collector and do acutal collecting
collector = Collector()
collector.collect(scanner, parser, aggregator, serializer, args.archive)
if __name__ == "__main__":
main()
| mit |
mattilyra/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
Akshay0724/scikit-learn | examples/classification/plot_classifier_comparison.py | 45 | 5123 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Gaussian Process",
"Decision Tree", "Random Forest", "Neural Net", "AdaBoost",
"Naive Bayes", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
MLPClassifier(alpha=1),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
plt.tight_layout()
plt.show()
| bsd-3-clause |
Shaswat27/scipy | scipy/signal/wavelets.py | 9 | 10458 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.dual import eig
from scipy.special import comb
from scipy import linspace, pi, exp
from scipy.signal import convolve
__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'cwt']
def daub(p):
"""
The coefficients for the FIR low-pass filter producing Daubechies wavelets.
p>=1 gives the order of the zero at f=1/2.
There are 2p filter coefficients.
Parameters
----------
p : int
Order of the zero at f=1/2, can have values from 1 to 34.
Returns
-------
daub : ndarray
Return
"""
sqrt = np.sqrt
if p < 1:
raise ValueError("p must be at least 1.")
if p == 1:
c = 1 / sqrt(2)
return np.array([c, c])
elif p == 2:
f = sqrt(2) / 8
c = sqrt(3)
return f * np.array([1 + c, 3 + c, 3 - c, 1 - c])
elif p == 3:
tmp = 12 * sqrt(10)
z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6
z1c = np.conj(z1)
f = sqrt(2) / 8
d0 = np.real((1 - z1) * (1 - z1c))
a0 = np.real(z1 * z1c)
a1 = 2 * np.real(z1)
return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1,
a0 - 3 * a1 + 3, 3 - a1, 1])
elif p < 35:
# construct polynomial and factor it
if p < 35:
P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1]
yj = np.roots(P)
else: # try different polynomial --- needs work
P = [comb(p - 1 + k, k, exact=1) / 4.0**k
for k in range(p)][::-1]
yj = np.roots(P) / 4
# for each root, compute two z roots, select the one with |z|>1
# Build up final polynomial
c = np.poly1d([1, 1])**p
q = np.poly1d([1])
for k in range(p - 1):
yval = yj[k]
part = 2 * sqrt(yval * (yval - 1))
const = 1 - 2 * yval
z1 = const + part
if (abs(z1)) < 1:
z1 = const - part
q = q * [1, -z1]
q = c * np.real(q)
# Normalize result
q = q / np.sum(q) * sqrt(2)
return q.c[::-1]
else:
raise ValueError("Polynomial factorization does not work "
"well for p too large.")
def qmf(hk):
"""
Return high-pass qmf filter from low-pass
Parameters
----------
hk : array_like
Coefficients of high-pass filter.
"""
N = len(hk) - 1
asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)]
return hk[::-1] * np.array(asgn)
def cascade(hk, J=7):
"""
Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.
Parameters
----------
hk : array_like
Coefficients of low-pass filter.
J : int, optional
Values will be computed at grid points ``K/2**J``. Default is 7.
Returns
-------
x : ndarray
The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
``len(hk) = len(gk) = N+1``.
phi : ndarray
The scaling function ``phi(x)`` at `x`:
``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
psi : ndarray, optional
The wavelet function ``psi(x)`` at `x`:
``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
`psi` is only returned if `gk` is not None.
Notes
-----
The algorithm uses the vector cascade algorithm described by Strang and
Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values
and slices for quick reuse. Then inserts vectors into final vector at the
end.
"""
N = len(hk) - 1
if (J > 30 - np.log2(N + 1)):
raise ValueError("Too many levels.")
if (J < 1):
raise ValueError("Too few levels.")
# construct matrices needed
nn, kk = np.ogrid[:N, :N]
s2 = np.sqrt(2)
# append a zero so that take works
thk = np.r_[hk, 0]
gk = qmf(hk)
tgk = np.r_[gk, 0]
indx1 = np.clip(2 * nn - kk, -1, N + 1)
indx2 = np.clip(2 * nn - kk + 1, -1, N + 1)
m = np.zeros((2, 2, N, N), 'd')
m[0, 0] = np.take(thk, indx1, 0)
m[0, 1] = np.take(thk, indx2, 0)
m[1, 0] = np.take(tgk, indx1, 0)
m[1, 1] = np.take(tgk, indx2, 0)
m *= s2
# construct the grid of points
x = np.arange(0, N * (1 << J), dtype=float) / (1 << J)
phi = 0 * x
psi = 0 * x
# find phi0, and phi1
lam, v = eig(m[0, 0])
ind = np.argmin(np.absolute(lam - 1))
# a dictionary with a binary representation of the
# evaluation points x < 1 -- i.e. position is 0.xxxx
v = np.real(v[:, ind])
# need scaling function to integrate to 1 so find
# eigenvector normalized to sum(v,axis=0)=1
sm = np.sum(v)
if sm < 0: # need scaling function to integrate to 1
v = -v
sm = -sm
bitdic = {'0': v / sm}
bitdic['1'] = np.dot(m[0, 1], bitdic['0'])
step = 1 << J
phi[::step] = bitdic['0']
phi[(1 << (J - 1))::step] = bitdic['1']
psi[::step] = np.dot(m[1, 0], bitdic['0'])
psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0'])
# descend down the levels inserting more and more values
# into bitdic -- store the values in the correct location once we
# have computed them -- stored in the dictionary
# for quicker use later.
prevkeys = ['1']
for level in range(2, J + 1):
newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
fac = 1 << (J - level)
for key in newkeys:
# convert key to number
num = 0
for pos in range(level):
if key[pos] == '1':
num += (1 << (level - 1 - pos))
pastphi = bitdic[key[1:]]
ii = int(key[0])
temp = np.dot(m[0, ii], pastphi)
bitdic[key] = temp
phi[num * fac::step] = temp
psi[num * fac::step] = np.dot(m[1, ii], pastphi)
prevkeys = newkeys
return x, phi, psi
def morlet(M, w=5.0, s=1.0, complete=True):
"""
Complex Morlet wavelet.
Parameters
----------
M : int
Length of the wavelet.
w : float, optional
Omega0. Default is 5
s : float, optional
Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1.
complete : bool, optional
Whether to use the complete or the standard version.
Returns
-------
morlet : (M,) ndarray
See Also
--------
scipy.signal.gausspulse
Notes
-----
The standard version::
pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))
This commonly used wavelet is often referred to simply as the
Morlet wavelet. Note that this simplified version can cause
admissibility problems at low values of w.
The complete version::
pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))
The complete version of the Morlet wavelet, with a correction
term to improve admissibility. For w greater than 5, the
correction term is negligible.
Note that the energy of the return wavelet is not normalised
according to s.
The fundamental frequency of this wavelet in Hz is given
by ``f = 2*s*w*r / M`` where r is the sampling rate.
"""
x = linspace(-s * 2 * pi, s * 2 * pi, M)
output = exp(1j * w * x)
if complete:
output -= exp(-0.5 * (w**2))
output *= exp(-0.5 * (x**2)) * pi**(-0.25)
return output
def ricker(points, a):
"""
Return a Ricker wavelet, also known as the "Mexican hat wavelet".
It models the function:
``A (1 - x^2/a^2) exp(-x^2/2 a^2)``,
where ``A = 2/sqrt(3a)pi^1/4``.
Parameters
----------
points : int
Number of points in `vector`.
Will be centered around 0.
a : scalar
Width parameter of the wavelet.
Returns
-------
vector : (N,) ndarray
Array of length `points` in shape of ricker curve.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> points = 100
>>> a = 4.0
>>> vec2 = signal.ricker(points, a)
>>> print(len(vec2))
100
>>> plt.plot(vec2)
>>> plt.show()
"""
A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))
wsq = a**2
vec = np.arange(0, points) - (points - 1.0) / 2
xsq = vec**2
mod = (1 - xsq / wsq)
gauss = np.exp(-xsq / (2 * wsq))
total = A * mod * gauss
return total
def cwt(data, wavelet, widths):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(length,width)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(widths), len(data)).
Notes
-----
::
length = min(10 * width[ii], len(data))
cwt[ii,:] = signal.convolve(data, wavelet(length,
width[ii]), mode='same')
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 200, endpoint=False)
>>> sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
>>> widths = np.arange(1, 31)
>>> cwtmatr = signal.cwt(sig, signal.ricker, widths)
>>> plt.imshow(cwtmatr, extent=[-1, 1, 31, 1], cmap='PRGn', aspect='auto',
... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
>>> plt.show()
"""
output = np.zeros([len(widths), len(data)])
for ind, width in enumerate(widths):
wavelet_data = wavelet(min(10 * width, len(data)), width)
output[ind, :] = convolve(data, wavelet_data,
mode='same')
return output
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/sklearn/mixture/dpgmm.py | 5 | 35827 | """Bayesian Gaussian Mixture Models and
Dirichlet Process Gaussian Mixture Models"""
from __future__ import print_function
# Author: Alexandre Passos ([email protected])
# Bertrand Thirion <[email protected]>
#
# Based on mixture.py by:
# Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# Important note for the deprecation cleaning of 0.20 :
# All the function and classes of this file have been deprecated in 0.18.
# When you remove this file please also remove the related files
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_dpgmm.py'
# - 'sklearn/mixture/test_gmm.py'
import numpy as np
from scipy.special import digamma as _digamma, gammaln as _gammaln
from scipy import linalg
from scipy.spatial.distance import cdist
from ..externals.six.moves import xrange
from ..utils import check_random_state, check_array, deprecated
from ..utils.extmath import logsumexp, pinvh, squared_norm
from ..utils.validation import check_is_fitted
from .. import cluster
from .gmm import _GMMBase
@deprecated("The function digamma is deprecated in 0.18 and "
"will be removed in 0.20. Use scipy.special.digamma instead.")
def digamma(x):
return _digamma(x + np.finfo(np.float32).eps)
@deprecated("The function gammaln is deprecated in 0.18 and "
"will be removed in 0.20. Use scipy.special.gammaln instead.")
def gammaln(x):
return _gammaln(x + np.finfo(np.float32).eps)
@deprecated("The function log_normalize is deprecated in 0.18 and "
"will be removed in 0.20.")
def log_normalize(v, axis=0):
"""Normalized probabilities from unnormalized log-probabilites"""
v = np.rollaxis(v, axis)
v = v.copy()
v -= v.max(axis=0)
out = logsumexp(v)
v = np.exp(v - out)
v += np.finfo(np.float32).eps
v /= np.sum(v, axis=0)
return np.swapaxes(v, 0, axis)
@deprecated("The function wishart_log_det is deprecated in 0.18 and "
"will be removed in 0.20.")
def wishart_log_det(a, b, detB, n_features):
"""Expected value of the log of the determinant of a Wishart
The expected value of the logarithm of the determinant of a
wishart-distributed random variable with the specified parameters."""
l = np.sum(digamma(0.5 * (a - np.arange(-1, n_features - 1))))
l += n_features * np.log(2)
return l + detB
@deprecated("The function wishart_logz is deprecated in 0.18 and "
"will be removed in 0.20.")
def wishart_logz(v, s, dets, n_features):
"The logarithm of the normalization constant for the wishart distribution"
z = 0.
z += 0.5 * v * n_features * np.log(2)
z += (0.25 * (n_features * (n_features - 1)) * np.log(np.pi))
z += 0.5 * v * np.log(dets)
z += np.sum(gammaln(0.5 * (v - np.arange(n_features) + 1)))
return z
def _bound_wishart(a, B, detB):
"""Returns a function of the dof, scale matrix and its determinant
used as an upper bound in variational approximation of the evidence"""
n_features = B.shape[0]
logprior = wishart_logz(a, B, detB, n_features)
logprior -= wishart_logz(n_features,
np.identity(n_features),
1, n_features)
logprior += 0.5 * (a - 1) * wishart_log_det(a, B, detB, n_features)
logprior += 0.5 * a * np.trace(B)
return logprior
##############################################################################
# Variational bound on the log likelihood of each class
##############################################################################
def _sym_quad_form(x, mu, A):
"""helper function to calculate symmetric quadratic form x.T * A * x"""
q = (cdist(x, mu[np.newaxis], "mahalanobis", VI=A) ** 2).reshape(-1)
return q
def _bound_state_log_lik(X, initial_bound, precs, means, covariance_type):
"""Update the bound with likelihood terms, for standard covariance types"""
n_components, n_features = means.shape
n_samples = X.shape[0]
bound = np.empty((n_samples, n_components))
bound[:] = initial_bound
if covariance_type in ['diag', 'spherical']:
for k in range(n_components):
d = X - means[k]
bound[:, k] -= 0.5 * np.sum(d * d * precs[k], axis=1)
elif covariance_type == 'tied':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs)
elif covariance_type == 'full':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs[k])
return bound
class _DPGMMBase(_GMMBase):
"""Variational Inference for the Infinite Gaussian Mixture Model.
DPGMM stands for Dirichlet Process Gaussian Mixture Model, and it
is an infinite mixture model with the Dirichlet Process as a prior
distribution on the number of clusters. In practice the
approximate inference algorithm uses a truncated distribution with
a fixed maximum number of components, but almost always the number
of components actually used depends on the data.
Stick-breaking Representation of a Gaussian mixture model
probability distribution. This class allows for easy and efficient
inference of an approximate posterior distribution over the
parameters of a Gaussian mixture model with a variable number of
components (smaller than the truncation parameter n_components).
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Read more in the :ref:`User Guide <dpgmm>`.
Parameters
----------
n_components: int, default 1
Number of mixture components.
covariance_type: string, default 'diag'
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
alpha: float, default 1
Real number representing the concentration parameter of
the dirichlet process. Intuitively, the Dirichlet Process
is as likely to start a new cluster for a point as it is
to add that point to a cluster with alpha elements. A
higher alpha means more clusters, as the expected number
of clusters is ``alpha*log(N)``.
tol : float, default 1e-3
Convergence threshold.
n_iter : int, default 10
Maximum number of iterations to perform before convergence.
params : string, default 'wmc'
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars.
init_params : string, default 'wmc'
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default 0
Controls output verbosity.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_components : int
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
VBGMM : Finite Gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0, min_covar=None,
n_iter=10, params='wmc', init_params='wmc'):
self.alpha = alpha
super(_DPGMMBase, self).__init__(n_components, covariance_type,
random_state=random_state,
tol=tol, min_covar=min_covar,
n_iter=n_iter, params=params,
init_params=init_params,
verbose=verbose)
def _get_precisions(self):
"""Return precisions as a full matrix."""
if self.covariance_type == 'full':
return self.precs_
elif self.covariance_type in ['diag', 'spherical']:
return [np.diag(cov) for cov in self.precs_]
elif self.covariance_type == 'tied':
return [self.precs_] * self.n_components
def _get_covars(self):
return [pinvh(c) for c in self._get_precisions()]
def _set_covars(self, covars):
raise NotImplementedError("""The variational algorithm does
not support setting the covariance parameters.""")
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
z = np.zeros((X.shape[0], self.n_components))
sd = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dgamma1 = digamma(self.gamma_.T[1]) - sd
dgamma2 = np.zeros(self.n_components)
dgamma2[0] = digamma(self.gamma_[0, 2]) - digamma(self.gamma_[0, 1] +
self.gamma_[0, 2])
for j in range(1, self.n_components):
dgamma2[j] = dgamma2[j - 1] + digamma(self.gamma_[j - 1, 2])
dgamma2[j] -= sd[j - 1]
dgamma = dgamma1 + dgamma2
# Free memory and developers cognitive load:
del dgamma1, dgamma2, sd
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dgamma
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
"""Update the concentration parameters for each cluster"""
sz = np.sum(z, axis=0)
self.gamma_.T[1] = 1. + sz
self.gamma_.T[2].fill(0)
for i in range(self.n_components - 2, -1, -1):
self.gamma_[i, 2] = self.gamma_[i + 1, 2] + sz[i]
self.gamma_.T[2] += self.alpha
def _update_means(self, X, z):
"""Update the variational distributions for the means"""
n_features = X.shape[1]
for k in range(self.n_components):
if self.covariance_type in ['spherical', 'diag']:
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num *= self.precs_[k]
den = 1. + self.precs_[k] * np.sum(z.T[k])
self.means_[k] = num / den
elif self.covariance_type in ['tied', 'full']:
if self.covariance_type == 'tied':
cov = self.precs_
else:
cov = self.precs_[k]
den = np.identity(n_features) + cov * np.sum(z.T[k])
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num = np.dot(cov, num)
self.means_[k] = linalg.lstsq(den, num)[0]
def _update_precisions(self, X, z):
"""Update the variational distributions for the precisions"""
n_features = X.shape[1]
if self.covariance_type == 'spherical':
self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
for k in range(self.n_components):
# could be more memory efficient ?
sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
self.scale_[k] = 1.
self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
self.bound_prec_[k] = (
0.5 * n_features * (
digamma(self.dof_[k]) - np.log(self.scale_[k])))
self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T
elif self.covariance_type == 'diag':
for k in range(self.n_components):
self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
sq_diff = (X - self.means_[k]) ** 2 # see comment above
self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
z.T[k], (sq_diff + 1))
self.precs_[k] = self.dof_[k] / self.scale_[k]
self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
- np.log(self.scale_[k]))
self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])
elif self.covariance_type == 'tied':
self.dof_ = 2 + X.shape[0] + n_features
self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
for k in range(self.n_components):
diff = X - self.means_[k]
self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_ = pinvh(self.scale_)
self.precs_ = self.dof_ * self.scale_
self.det_scale_ = linalg.det(self.scale_)
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
sum_resp = np.sum(z.T[k])
self.dof_[k] = 2 + sum_resp + n_features
self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
diff = X - self.means_[k]
self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_[k] = pinvh(self.scale_[k])
self.precs_[k] = self.dof_[k] * self.scale_[k]
self.det_scale_[k] = linalg.det(self.scale_[k])
self.bound_prec_[k] = 0.5 * wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
self.scale_[k])
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose > 0:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_.T[1])
print("covariance_type:", self.covariance_type)
def _do_mstep(self, X, z, params):
"""Maximize the variational lower bound
Update each of the parameters to maximize the lower bound."""
self._monitor(X, z, "z")
self._update_concentration(z)
self._monitor(X, z, "gamma")
if 'm' in params:
self._update_means(X, z)
self._monitor(X, z, "mu")
if 'c' in params:
self._update_precisions(X, z)
self._monitor(X, z, "a and b", end=True)
def _initialize_gamma(self):
"Initializes the concentration parameters"
self.gamma_ = self.alpha * np.ones((self.n_components, 3))
def _bound_concentration(self):
"""The variational lower bound for the concentration parameter."""
logprior = gammaln(self.alpha) * self.n_components
logprior += np.sum((self.alpha - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior += np.sum(- gammaln(self.gamma_.T[1] + self.gamma_.T[2]))
logprior += np.sum(gammaln(self.gamma_.T[1]) +
gammaln(self.gamma_.T[2]))
logprior -= np.sum((self.gamma_.T[1] - 1) * (
digamma(self.gamma_.T[1]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior -= np.sum((self.gamma_.T[2] - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
return logprior
def _bound_means(self):
"The variational lower bound for the mean parameters"
logprior = 0.
logprior -= 0.5 * squared_norm(self.means_)
logprior -= 0.5 * self.means_.shape[1] * self.n_components
return logprior
def _bound_precisions(self):
"""Returns the bound term related to precisions"""
logprior = 0.
if self.covariance_type == 'spherical':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_
- self.precs_[:, 0])
elif self.covariance_type == 'diag':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_ - self.precs_)
elif self.covariance_type == 'tied':
logprior += _bound_wishart(self.dof_, self.scale_, self.det_scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
logprior += _bound_wishart(self.dof_[k],
self.scale_[k],
self.det_scale_[k])
return logprior
def _bound_proportions(self, z):
"""Returns the bound term related to proportions"""
dg12 = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dg1 = digamma(self.gamma_.T[1]) - dg12
dg2 = digamma(self.gamma_.T[2]) - dg12
cz = np.cumsum(z[:, ::-1], axis=-1)[:, -2::-1]
logprior = np.sum(cz * dg2[:-1]) + np.sum(z * dg1)
del cz # Save memory
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _logprior(self, z):
logprior = self._bound_concentration()
logprior += self._bound_means()
logprior += self._bound_precisions()
logprior += self._bound_proportions(z)
return logprior
def lower_bound(self, X, z):
"""returns a lower bound on model evidence based on X and membership"""
check_is_fitted(self, 'means_')
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
c = np.sum(z * _bound_state_log_lik(X, self._initial_bound +
self.bound_prec_, self.precs_,
self.means_, self.covariance_type))
return c + self._logprior(z)
def _set_weights(self):
for i in xrange(self.n_components):
self.weights_[i] = self.gamma_[i, 1] / (self.gamma_[i, 1]
+ self.gamma_[i, 2])
self.weights_ /= np.sum(self.weights_)
def _fit(self, X, y=None):
"""Estimate model parameters with the variational
algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating
the object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
self.random_state_ = check_random_state(self.random_state)
# initialization step
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
z = np.ones((n_samples, self.n_components))
z /= self.n_components
self._initial_bound = - 0.5 * n_features * np.log(2 * np.pi)
self._initial_bound -= np.log(2 * np.pi * np.e)
if (self.init_params != '') or not hasattr(self, 'gamma_'):
self._initialize_gamma()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state_).fit(X).cluster_centers_[::-1]
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components, self.n_components)
if 'c' in self.init_params or not hasattr(self, 'precs_'):
if self.covariance_type == 'spherical':
self.dof_ = np.ones(self.n_components)
self.scale_ = np.ones(self.n_components)
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * n_features * (
digamma(self.dof_) - np.log(self.scale_))
elif self.covariance_type == 'diag':
self.dof_ = 1 + 0.5 * n_features
self.dof_ *= np.ones((self.n_components, n_features))
self.scale_ = np.ones((self.n_components, n_features))
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * (np.sum(digamma(self.dof_) -
np.log(self.scale_), 1))
self.bound_prec_ -= 0.5 * np.sum(self.precs_, 1)
elif self.covariance_type == 'tied':
self.dof_ = 1.
self.scale_ = np.identity(n_features)
self.precs_ = np.identity(n_features)
self.det_scale_ = 1.
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
self.dof_ = (1 + self.n_components + n_samples)
self.dof_ *= np.ones(self.n_components)
self.scale_ = [2 * np.identity(n_features)
for _ in range(self.n_components)]
self.precs_ = [np.identity(n_features)
for _ in range(self.n_components)]
self.det_scale_ = np.ones(self.n_components)
self.bound_prec_ = np.zeros(self.n_components)
for k in range(self.n_components):
self.bound_prec_[k] = wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= (self.dof_[k] *
np.trace(self.scale_[k]))
self.bound_prec_ *= 0.5
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
prev_log_likelihood = current_log_likelihood
# Expectation step
curr_logprob, z = self.score_samples(X)
current_log_likelihood = (
curr_logprob.mean() + self._logprior(z) / n_samples)
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if change < self.tol:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, z, self.params)
if self.n_iter == 0:
# Need to make sure that there is a z value to output
# Output zeros because it was just a quick initialization
z = np.zeros((X.shape[0], self.n_components))
self._set_weights()
return z
@deprecated("The `DPGMM` class is not working correctly and it's better "
"to use `sklearn.mixture.BayesianGaussianMixture` class with "
"parameter `weight_concentration_prior_type='dirichlet_process'` "
"instead. DPGMM is deprecated in 0.18 and will be "
"removed in 0.20.")
class DPGMM(_DPGMMBase):
"""Dirichlet Process Gaussian Mixture Models
.. deprecated:: 0.18
This class will be removed in 0.20.
Use :class:`sklearn.mixture.BayesianGaussianMixture` with
parameter ``weight_concentration_prior_type='dirichlet_process'``
instead.
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0, min_covar=None,
n_iter=10, params='wmc', init_params='wmc'):
super(DPGMM, self).__init__(
n_components=n_components, covariance_type=covariance_type,
alpha=alpha, random_state=random_state, tol=tol, verbose=verbose,
min_covar=min_covar, n_iter=n_iter, params=params,
init_params=init_params)
@deprecated("The `VBGMM` class is not working correctly and it's better "
"to use `sklearn.mixture.BayesianGaussianMixture` class with "
"parameter `weight_concentration_prior_type="
"'dirichlet_distribution'` instead. "
"VBGMM is deprecated in 0.18 and will be removed in 0.20.")
class VBGMM(_DPGMMBase):
"""Variational Inference for the Gaussian Mixture Model
.. deprecated:: 0.18
This class will be removed in 0.20.
Use :class:`sklearn.mixture.BayesianGaussianMixture` with parameter
``weight_concentration_prior_type='dirichlet_distribution'`` instead.
Variational inference for a Gaussian mixture model probability
distribution. This class allows for easy and efficient inference
of an approximate posterior distribution over the parameters of a
Gaussian mixture model with a fixed number of components.
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Read more in the :ref:`User Guide <vbgmm>`.
Parameters
----------
n_components: int, default 1
Number of mixture components.
covariance_type: string, default 'diag'
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
alpha: float, default 1
Real number representing the concentration parameter of
the dirichlet distribution. Intuitively, the higher the
value of alpha the more likely the variational mixture of
Gaussians model will use all components it can.
tol : float, default 1e-3
Convergence threshold.
n_iter : int, default 10
Maximum number of iterations to perform before convergence.
params : string, default 'wmc'
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars.
init_params : string, default 'wmc'
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default 0
Controls output verbosity.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussians.
n_components : int (read-only)
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False
otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
DPGMM : Infinite Gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0,
min_covar=None, n_iter=10, params='wmc', init_params='wmc'):
super(VBGMM, self).__init__(
n_components, covariance_type, random_state=random_state,
tol=tol, verbose=verbose, min_covar=min_covar,
n_iter=n_iter, params=params, init_params=init_params)
self.alpha = alpha
def _fit(self, X, y=None):
"""Estimate model parameters with the variational algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the EM
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating
the object. Likewise, if you just would like to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
self.alpha_ = float(self.alpha) / self.n_components
return super(VBGMM, self)._fit(X, y)
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
dg = digamma(self.gamma_) - digamma(np.sum(self.gamma_))
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dg
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
for i in range(self.n_components):
self.gamma_[i] = self.alpha_ + np.sum(z.T[i])
def _initialize_gamma(self):
self.gamma_ = self.alpha_ * np.ones(self.n_components)
def _bound_proportions(self, z):
logprior = 0.
dg = digamma(self.gamma_)
dg -= digamma(np.sum(self.gamma_))
logprior += np.sum(dg.reshape((-1, 1)) * z.T)
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _bound_concentration(self):
logprior = 0.
logprior = gammaln(np.sum(self.gamma_)) - gammaln(self.n_components
* self.alpha_)
logprior -= np.sum(gammaln(self.gamma_) - gammaln(self.alpha_))
sg = digamma(np.sum(self.gamma_))
logprior += np.sum((self.gamma_ - self.alpha_)
* (digamma(self.gamma_) - sg))
return logprior
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose > 0:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_)
print("covariance_type:", self.covariance_type)
def _set_weights(self):
self.weights_[:] = self.gamma_
self.weights_ /= np.sum(self.weights_)
| mit |
david-ragazzi/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/units.py | 70 | 4810 | """
The classes here provide support for using custom classes with
matplotlib, eg those that do not expose the array interface but know
how to converter themselves to arrays. It also supoprts classes with
units and units conversion. Use cases include converters for custom
objects, eg a list of datetime objects, as well as for objects that
are unit aware. We don't assume any particular units implementation,
rather a units implementation must provide a ConversionInterface, and
the register with the Registry converter dictionary. For example,
here is a complete implementation which support plotting with native
datetime objects
import matplotlib.units as units
import matplotlib.dates as dates
import matplotlib.ticker as ticker
import datetime
class DateConverter(units.ConversionInterface):
def convert(value, unit):
'convert value to a scalar or array'
return dates.date2num(value)
convert = staticmethod(convert)
def axisinfo(unit):
'return major and minor tick locators and formatters'
if unit!='date': return None
majloc = dates.AutoDateLocator()
majfmt = dates.AutoDateFormatter(majloc)
return AxisInfo(majloc=majloc,
majfmt=majfmt,
label='date')
axisinfo = staticmethod(axisinfo)
def default_units(x):
'return the default unit for x or None'
return 'date'
default_units = staticmethod(default_units)
# finally we register our object type with a converter
units.registry[datetime.date] = DateConverter()
"""
import numpy as np
from matplotlib.cbook import iterable, is_numlike
class AxisInfo:
'information to support default axis labeling and tick labeling'
def __init__(self, majloc=None, minloc=None,
majfmt=None, minfmt=None, label=None):
"""
majloc and minloc: TickLocators for the major and minor ticks
majfmt and minfmt: TickFormatters for the major and minor ticks
label: the default axis label
If any of the above are None, the axis will simply use the default
"""
self.majloc = majloc
self.minloc = minloc
self.majfmt = majfmt
self.minfmt = minfmt
self.label = label
class ConversionInterface:
"""
The minimal interface for a converter to take custom instances (or
sequences) and convert them to values mpl can use
"""
def axisinfo(unit):
'return an units.AxisInfo instance for unit'
return None
axisinfo = staticmethod(axisinfo)
def default_units(x):
'return the default unit for x or None'
return None
default_units = staticmethod(default_units)
def convert(obj, unit):
"""
convert obj using unit. If obj is a sequence, return the
converted sequence. The ouput must be a sequence of scalars
that can be used by the numpy array layer
"""
return obj
convert = staticmethod(convert)
def is_numlike(x):
"""
The matplotlib datalim, autoscaling, locators etc work with
scalars which are the units converted to floats given the
current unit. The converter may be passed these floats, or
arrays of them, even when units are set. Derived conversion
interfaces may opt to pass plain-ol unitless numbers through
the conversion interface and this is a helper function for
them.
"""
if iterable(x):
for thisx in x:
return is_numlike(thisx)
else:
return is_numlike(x)
is_numlike = staticmethod(is_numlike)
class Registry(dict):
"""
register types with conversion interface
"""
def __init__(self):
dict.__init__(self)
self._cached = {}
def get_converter(self, x):
'get the converter interface instance for x, or None'
if not len(self): return None # nothing registered
#DISABLED idx = id(x)
#DISABLED cached = self._cached.get(idx)
#DISABLED if cached is not None: return cached
converter = None
classx = getattr(x, '__class__', None)
if classx is not None:
converter = self.get(classx)
if converter is None and iterable(x):
# if this is anything but an object array, we'll assume
# there are no custom units
if isinstance(x, np.ndarray) and x.dtype != np.object:
return None
for thisx in x:
converter = self.get_converter( thisx )
return converter
#DISABLED self._cached[idx] = converter
return converter
registry = Registry()
| gpl-3.0 |
jun-wan/scilifelab | scilifelab/io/pandas/picard.py | 4 | 1924 | """pm picard lib"""
import os
import re
import pandas as pd
from scilifelab.io import index_containing_substring
import scilifelab.log
LOG = scilifelab.log.minimal_logger(__name__)
METRICS_TYPES=['align', 'hs', 'dup', 'insert']
def _raw(x):
return (x, None)
def _convert_input(x):
if re.match("^[0-9]+$", x):
return int(x)
elif re.match("^[0-9,.]+$", x):
return float(x.replace(",", "."))
else:
return str(x)
def _read_picard_metrics(f):
if not os.path.exists(f):
LOG.warn("IO failure: no such file {}".format(f))
return (None, None)
with open(f) as fh:
data = fh.readlines()
# Find histogram line
i_hist = index_containing_substring(data, "## HISTOGRAM")
if i_hist == -1:
i = len(data)
else:
i = i_hist
tmp = [[_convert_input(y) for y in x.rstrip("\n").split("\t")] for x in data[0:i] if not re.match("^[ #\n]", x)]
metrics = pd.DataFrame(tmp[1:], columns=tmp[0])
if i_hist == -1:
return (metrics, None)
tmp = [[_convert_input(y) for y in x.rstrip("\n").split("\t")] for x in data[i_hist:len(data)] if not re.match("^[ #\n]", x)]
hist = pd.DataFrame(tmp[1:], columns=tmp[0])
return (metrics, hist)
# For now: extension maps to tuple (label, description). Label should
# be reused for analysis definitions
EXTENSIONS={'.align_metrics':('align', 'alignment', _read_picard_metrics),
'.hs_metrics':('hs', 'hybrid selection', _read_picard_metrics),
'.dup_metrics':('dup', 'duplication metrics', _read_picard_metrics),
'.insert_metrics':('insert', 'insert size', _read_picard_metrics),
'.eval_metrics':('eval', 'snp evaluation', _raw)
}
def read_metrics(f):
"""Read metrics"""
(_, metrics_type) = os.path.splitext(f)
d = EXTENSIONS[metrics_type][2](f)
return d
| mit |
kernc/scikit-learn | sklearn/decomposition/dict_learning.py | 42 | 46134 | """ Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000, check_input=True, verbose=0):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
check_input: boolean, optional
If False, the input arrays X and dictionary will not be checked.
verbose: int
Controls the verbosity; the higher, the more messages. Defaults to 0.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=verbose, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
# TODO: Make verbosity argument for Lasso?
# sklearn.linear_model.coordinate_descent.enet_path has a verbosity
# argument that we could pass in from Lasso.
clf = Lasso(alpha=alpha, fit_intercept=False, normalize=False,
precompute=gram, max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T, check_input=check_input)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lars = Lars(fit_intercept=False, verbose=verbose, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
# TODO: Should verbose argument be passed to this?
new_code = orthogonal_mp_gram(
Gram=gram, Xy=cov, n_nonzero_coefs=int(regularization),
tol=None, norms_squared=row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1, check_input=True, verbose=0):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
check_input: boolean, optional
If False, the input arrays X and dictionary will not be checked.
verbose : int, optional
Controls the verbosity; the higher, the more messages. Defaults to 0.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if check_input:
if algorithm == 'lasso_cd':
dictionary = check_array(dictionary, order='C', dtype='float64')
X = check_array(X, order='C', dtype='float64')
else:
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None and algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
code = _sparse_encode(X,
dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init,
max_iter=max_iter,
check_input=False,
verbose=verbose)
# This ensure that dimensionality of code is always 2,
# consistant with the case n_jobs > 1
if code.ndim == 1:
code = code[np.newaxis, :]
return code
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram,
cov[:, this_slice] if cov is not None else None,
algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter,
check_input=False)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
dictionary = check_array(dictionary.T, order='F', dtype=np.float64,
copy=False)
X_train = check_array(X_train, order='C', dtype=np.float64, copy=False)
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs, check_input=False)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
.. versionadded:: 0.17
*cd* coordinate descent method to improve speed.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
.. versionadded:: 0.17
*lasso_cd* coordinate descent method to improve speed.
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
| bsd-3-clause |
chrisburr/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 14 | 39946 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from itertools import product
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert_true(np.any(deviance_decrease >= 0.0))
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_classification_toy():
for presort, loss in product(('auto', True, False),
('deviance', 'exponential')):
yield check_classification_toy, presort, loss
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
def test_classification_synthetic():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_synthetic, presort, loss
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2 * ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=2,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_less(mse, 6.0)
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
def test_boston():
for presort, loss, subsample in product(('auto', True, False),
('ls', 'lad', 'huber'),
(1.0, 0.5)):
yield check_boston, presort, loss, subsample
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_iris():
ones = np.ones(len(iris.target))
for presort, subsample, sample_weight in product(('auto', True, False),
(1.0, 0.5),
(None, ones)):
yield check_iris, presort, subsample, sample_weight
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 2, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=2, random_state=1,
presort=presort)
clf.fit(X, y)
assert_true(hasattr(clf, 'feature_importances_'))
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = (
clf.feature_importances_ > clf.feature_importances_.mean())
assert_array_almost_equal(X_new, X[:, feature_mask])
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_equal(clf.oob_improvement_.shape[0], 100)
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert_equal(est.estimators_[0, 0].max_depth, 1)
for i in range(1, 11):
assert_equal(est.estimators_[-i, 0].max_depth, 2)
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2,
loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0,
max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
if isinstance(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
@skip_if_32bit
def test_sparse_input():
ests = (GradientBoostingClassifier, GradientBoostingRegressor)
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for EstimatorClass, sparse_matrix in product(ests, sparse_matrices):
yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
| bsd-3-clause |
dmitriy-serdyuk/EncDecASR | scripts/evaluate.py | 18 | 2305 | #!/usr/bin/env python
import numpy
import pandas
import argparse
import matplotlib
import logging
matplotlib.use("Agg")
from matplotlib import pyplot
logger = logging.getLogger()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--start", type=int, default=0, help="Start from this iteration")
parser.add_argument("--finish", type=int, default=10 ** 9, help="Finish with that iteration")
parser.add_argument("--window", type=int, default=100, help="Window width")
parser.add_argument("--hours", action="store_true", default=False, help="Display time on X-axis")
parser.add_argument("--legend", default=None, help="Legend to use in plot")
parser.add_argument("--y", default="log2_p_expl", help="What to plot")
parser.add_argument("timings", nargs="+", help="Path to timing files")
parser.add_argument("plot_path", help="Path to save plot")
return parser.parse_args()
def load_timings(path, args, y):
logging.debug("Loading timings from {}".format(path))
tm = numpy.load(path)
num_steps = min(tm['step'], args.finish)
df = pandas.DataFrame({k : tm[k] for k in [y, 'time_step']})[args.start:num_steps]
one_step = df['time_step'].median() / 3600.0
logging.debug("Median time for one step is {} hours".format(one_step))
if args.hours:
df.index = (args.start + numpy.arange(0, df.index.shape[0])) * one_step
return pandas.rolling_mean(df, args.window).iloc[args.window:]
if __name__ == "__main__":
args = parse_args()
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s: %(name)s: %(levelname)s: %(message)s")
args.y = args.y.split(',')
if len(args.y) < 2:
args.y = [args.y[0]] * len(args.timings)
datas = [load_timings(path, args, y) for path,y in zip(args.timings,args.y)]
for path, y, data in zip(args.timings, args.y, datas):
pyplot.plot(data.index, data[y])
print "Average {} is {} after {} {} for {}".format(
y, data[y].iloc[-1],
data.index[-1], "hours" if args.hours else "iterations", path)
pyplot.xlabel("hours" if args.hours else "iterations")
pyplot.ylabel("log_2 likelihood")
pyplot.legend(args.legend.split(",") if args.legend else range(len(datas)))
pyplot.savefig(args.plot_path)
| bsd-3-clause |
CforED/Machine-Learning | sklearn/metrics/tests/test_score_objects.py | 17 | 14051 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
| bsd-3-clause |
kagayakidan/scikit-learn | sklearn/datasets/twenty_newsgroups.py | 126 | 13591 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_vectorized` function will in addition do a simple
tf-idf vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warning("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warning("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
with open(archive_path, 'wb') as f:
f.write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = os.path.join(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = os.path.join(data_home, filebase + ".pk")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| bsd-3-clause |
lakshayg/tensorflow | tensorflow/examples/learn/text_classification_character_cnn.py | 33 | 5463 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using convolutional networks over characters for DBpedia dataset.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
N_FILTERS = 10
FILTER_SHAPE1 = [20, 256]
FILTER_SHAPE2 = [20, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
MAX_LABEL = 15
CHARS_FEATURE = 'chars' # Name of the input character feature.
def char_cnn_model(features, labels, mode):
"""Character level convolutional neural network model to predict classes."""
features_onehot = tf.one_hot(features[CHARS_FEATURE], 256)
input_layer = tf.reshape(
features_onehot, [-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.layers.conv2d(
input_layer,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE1,
padding='VALID',
# Add a ReLU for non linearity.
activation=tf.nn.relu)
# Max pooling across output of Convolution+Relu.
pool1 = tf.layers.max_pooling2d(
conv1,
pool_size=POOLING_WINDOW,
strides=POOLING_STRIDE,
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.layers.conv2d(
pool1,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.layers.dense(pool2, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data, size='large')
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = tf.contrib.learn.preprocessing.ByteProcessor(
MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
x_train = x_train.reshape([-1, MAX_DOCUMENT_LENGTH, 1, 1])
x_test = x_test.reshape([-1, MAX_DOCUMENT_LENGTH, 1, 1])
# Build model
classifier = tf.estimator.Estimator(model_fn=char_cnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_train},
y=y_train,
batch_size=128,
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy: {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
kmike/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 2 | 29314 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDClassifier.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDClassifier.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDClassifier.decision_function(self, X, *args, **kw)
def predict_proba(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDClassifier.predict_proba(self, X, *args, **kw)
def predict_log_proba(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDClassifier.predict_log_proba(self, X, *args, **kw)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
##
## Test Data
##
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
##
## Classification Test Case
##
class CommonTest(object):
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
#... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
"""Test multiple calls of fit w/ different shaped inputs."""
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
def test_input_format(self):
"""Input format tests. """
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
Y_ = np.array(Y)[:, np.newaxis]
clf.fit(X, Y_)
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
"""Test whether clone works ok. """
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory = SGDClassifier
def test_sgd(self):
"""Check that SGD gives any results :-)"""
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
#assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
"""Check whether expected ValueError on bad l1_ratio"""
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
"""Check whether expected ValueError on bad learning_rate"""
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
"""Check whether expected ValueError on bad eta0"""
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
"""Check whether expected ValueError on bad alpha"""
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
"""Check whether expected ValueError on bad penalty"""
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
"""Check whether expected ValueError on bad loss"""
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
"""Test parameter validity check"""
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
"""Test parameter validity check"""
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
"""Checks coef_init not allowed as model argument (only fit)"""
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
"""Checks coef_init shape for the warm starts"""
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
"""Checks intercept_ shape for the warm starts"""
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
"""Checks intercept_ shape for the warm starts in binary case"""
self.factory().fit(X5, Y5, intercept_init=0)
def test_set_intercept_to_intercept(self):
"""Checks intercept_ shape consistency for the warm starts"""
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
"""Target must have at least two labels"""
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_sgd_multiclass(self):
"""Multi-class test case"""
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_with_init_coef(self):
"""Multi-class test case"""
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
"""Multi-class test case with multi-core support"""
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
"""Checks coef_init and intercept_init shape for for multi-class
problems"""
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
"""Check SGD.predict_proba"""
# hinge loss does not allow for conditional prob estimate
clf = self.factory(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_raises(NotImplementedError, clf.predict_proba, [3, 2])
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([3, 2])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([-1, -1])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([3, 2])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([-1, -1])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([-1, -1])
d = clf.decision_function([-1, -1])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([3, 2])
p = clf.predict_proba([3, 2])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([-1, -1])
p = clf.predict_proba([-1, -1])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([3, 2])
p = clf.predict_proba([3, 2])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function(x)
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba(x)
assert_array_almost_equal(p[0], [1/3.] * 3)
def test_sgd_l1(self):
"""Test L1 regularization"""
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx, :]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
"""
Test class weights.
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
"""Test if equal class weights approx. equals no class weights. """
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
"""ValueError due to not existing class label."""
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
"""ValueError due to wrong class_weight argument type."""
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_auto_weight(self):
"""Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X)), 0.96,
decimal=1)
# make the same prediction using automated class_weight
clf_auto = self.factory(alpha=0.0001, n_iter=1000,
class_weight="auto").fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_auto.predict(X)), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "auto"
assert_array_almost_equal(clf.coef_, clf_auto.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred), 0.96)
# fit a model with auto class_weight enabled
clf = self.factory(n_iter=1000, class_weight="auto")
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="auto")
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred), 0.96)
def test_sample_weights(self):
"""Test weights on individual samples"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
"""Test if ValueError is raised if sample_weight has wrong shape"""
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase):
"""Test suite for the dense representation variant of SGD"""
factory = SGDRegressor
def test_sgd(self):
"""Check that SGD gives any results."""
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
"""Check whether expected ValueError on bad penalty"""
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
"""Check whether expected ValueError on bad loss"""
self.factory(loss="foobar")
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
"""Check that the SGD ouput is consistent with coordinate descent"""
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory = SparseSGDRegressor
| bsd-3-clause |
mwong009/iclv_rbm | run_mnl.py | 1 | 7767 | import pickle
import timeit
import numpy as np
import pandas as pd
import theano
import theano.tensor as T
from theano import shared, function
from theano.tensor.shared_randomstreams import RandomStreams
from models import optimizers
from models.rum import Logistic
from models.preprocessing import extractdata
""" Custom options """
floatX = theano.config.floatX
pd.set_option('display.float_format', lambda x: '%.3f' % x)
pd.options.display.max_rows = 999
csvString = 'data/US_SP_Restructured.csv'
def run_mnl():
""" Discrete choice model estimation with Theano
Setup
-----
step 1: Load variables from csv file
step 2: Define hyperparameters used in the computation
step 3: define symbolic Theano tensors
step 4: build model and define cost function
step 5: define gradient calculation algorithm
step 6: define Theano symbolic functions
step 7: run main estimaiton loop for n iterations
step 8: perform analytics and model statistics
"""
# compile and import dataset from csv#
d_x_ng, d_x_g, d_y, avail, d_ind = extractdata(csvString)
data_x_ng = shared(np.asarray(d_x_ng, dtype=floatX), borrow=True)
data_x_g = shared(np.asarray(d_x_g, dtype=floatX), borrow=True)
data_y = T.cast(shared(np.asarray(d_y-1, dtype=floatX), borrow=True),
'int32')
data_av = shared(np.asarray(avail, dtype=floatX), borrow=True)
sz_n = d_x_g.shape[0] # number of samples
sz_k = d_x_g.shape[1] # number of generic variables
sz_m = d_x_ng.shape[2] # number of non-generic variables
sz_i = d_x_ng.shape[1] # number of alternatives
sz_minibatch = sz_n # model hyperparameters
learning_rate = 0.3
momentum = 0.9
x_ng = T.tensor3('data_x_ng') # symbolic theano tensors
x_g = T.matrix('data_x_g')
y = T.ivector('data_y')
av = T.matrix('data_av')
index = T.lscalar('index')
# construct model
model = Logistic(
sz_i, av,
input=[x_ng, x_g],
n_in=[(sz_m,), (sz_k, sz_i)])
cost = - model.loglikelihood(y)
# calculate the gradients wrt to the loss function
grads = T.grad(cost=cost, wrt=model.params)
opt = optimizers.adadelta(model.params, model.masks, momentum)
updates = opt.updates(
model.params, grads, learning_rate)
# hessian function
fn_hessian = function(
inputs=[],
outputs=T.hessian(cost=cost, wrt=model.params),
givens={
x_ng: data_x_ng,
x_g: data_x_g,
y: data_y,
av: data_av},
on_unused_input='ignore')
# null loglikelihood function
fn_null = function(
inputs=[],
outputs=model.loglikelihood(y),
givens={
x_ng: data_x_ng,
x_g: data_x_g,
y: data_y,
av: data_av},
on_unused_input='ignore')
# compile the theano functions
fn_estimate = function(
name='estimate',
inputs=[index],
outputs=[model.loglikelihood(y), model.errors(y)],
updates=updates,
givens={
x_ng: data_x_ng[
index*sz_minibatch: T.min(((index+1)*sz_minibatch, sz_n))],
x_g: data_x_g[
index*sz_minibatch: T.min(((index+1)*sz_minibatch, sz_n))],
y: data_y[
index*sz_minibatch: T.min(((index+1)*sz_minibatch, sz_n))],
av: data_av[
index*sz_minibatch: T.min(((index+1)*sz_minibatch, sz_n))]},
allow_input_downcast=True,
on_unused_input='ignore',)
""" Main estimation process loop """
print('Begin estimation...')
epoch = 0 # process loop parameters
sz_epoches = 9999
sz_batches = np.ceil(sz_n/sz_minibatch).astype(np.int32)
done_looping = False
patience = 300
patience_inc = 10
best_loglikelihood = -np.inf
null_Loglikelihood = fn_null()
start_time = timeit.default_timer()
while epoch < sz_epoches and done_looping is False:
epoch_error = []
epoch_loglikelihood = []
for i in range(sz_batches):
(batch_loglikelihood, batch_error) = fn_estimate(i)
epoch_error.append(batch_error)
epoch_loglikelihood.append(batch_loglikelihood)
this_loglikelihood = np.sum(epoch_loglikelihood)
print('@ iteration %d loglikelihood: %.3f'
% (epoch, this_loglikelihood))
if this_loglikelihood > best_loglikelihood:
if this_loglikelihood > 0.997 * best_loglikelihood:
patience += patience_inc
best_loglikelihood = this_loglikelihood
with open('best_model.pkl', 'wb') as f:
pickle.dump(model, f)
if epoch > patience:
done_looping = True
epoch += 1
final_Loglikelihood = best_loglikelihood
rho_square = 1.-(final_Loglikelihood/null_Loglikelihood)
end_time = timeit.default_timer()
""" Analytics and model statistics """
print('... solving Hessians')
h = np.hstack([np.diagonal(mat) for mat in fn_hessian()])
n_est_params = np.count_nonzero(h)
aic = 2 * n_est_params - 2 * final_Loglikelihood
bic = np.log(sz_n) * n_est_params - 2 * final_Loglikelihood
print('@iteration %d, run time %.3f '
% (epoch, end_time-start_time))
print('Null Loglikelihood: %.3f'
% null_Loglikelihood)
print('Final Loglikelihood: %.3f'
% final_Loglikelihood)
print('rho square %.3f'
% rho_square)
print('AIC %.3f'
% aic)
print('BIC %.3f'
% bic)
with open('best_model.pkl', 'rb') as f:
best_model = pickle.load(f)
run_analytics(best_model, h)
def run_analytics(model, hessians):
stderr = 2 / np.sqrt(hessians)
betas = np.concatenate(
[param.get_value() for param in model.params], axis=0)
t_stat = betas/stderr
data = np.vstack((betas, stderr, t_stat)).T
columns = ['betas', 'serr', 't_stat']
paramNames = [] # print dataFrame
choices = ['Bus', 'CarRental', 'Car', 'Plane', 'TrH', 'Train']
ASCs = []
for choice in choices:
ASCs.append('ASC_'+choice)
nongenericNames = ['cost', 'tt', 'relib']
genericNames = [
'DrvLicens', 'PblcTrst',
'Ag1825', 'Ag2545', 'Ag4565', 'Ag65M', 'Male', 'Fulltime',
'PrtTime', 'Unemplyd', 'Edu_Highschl', 'Edu_BSc', 'Edu_MscPhD',
'HH_Veh0', 'HH_Veh1', 'HH_Veh2M',
'HH_Adult1', 'HH_Adult2', 'HH_Adult3M',
'HH_Chld0', 'HH_Chld1', 'HH_Chld2M',
'HH_Inc020K', 'HH_Inc2060K', 'HH_Inc60KM',
# 'HH_Sngl', 'HH_SnglParent', 'HH_AllAddults',
# 'HH_Nuclear', 'P_Chld',
# 'O_MTL_US_max', 'O_Odr_US_max',
# 'D_Bstn_max', 'D_NYC_max', 'D_Maine_max',
# 'Tp_Onewy_max', 'Tp_2way_max',
# 'Tp_h06_max', 'Tp_h69_max', 'Tp_h915_max',
# 'Tp_h1519_max', 'Tp_h1924_max', 'Tp_h1524_max',
# 'Tp_Y2016_max', 'Tp_Y2017_max',
# 'Tp_Wntr_max', 'Tp_Sprng_max', 'Tp_Sumr_max', 'Tp_Fall_max',
# 'Tp_CarDrv_max', 'Tp_CarPsngr_max', 'Tp_CarShrRnt_max',
# 'Tp_Train_max', 'Tp_Bus_max',
# 'Tp_Plane_max', 'Tp_ModOdr_max',
# 'Tp_WrkSkl_max', 'Tp_Leisr_max',
# 'Tp_Shpng_max', 'Tp_ActOdr_max',
# 'Tp_NHotel1_max', 'Tp_NHotel2_max', 'Tp_NHotel3M_max',
# 'Tp_FreqMonthlMulti_max', 'Tp_FreqYearMulti_max'
# 'Tp_FreqYear1_max',
]
for ASC in ASCs:
paramNames.append(ASC)
for name in nongenericNames:
paramNames.append(name)
for name in genericNames:
for choice in choices:
paramNames.append(name+'_'+choice)
df = pd.DataFrame(data, paramNames, columns)
print(df)
if __name__ == '__main__':
run_mnl()
| gpl-3.0 |
aerler/GeoPy | src/utils/ascii.py | 1 | 31520 | '''
Created on Jan 4, 2017
A module to load ASCII raster data into numpy arrays.
@author: Andre R. Erler, GPL v3
'''
# external imports
import numpy as np
import numpy.ma as ma
import gzip, shutil, tempfile
import os, gc
# internal imports
from geodata.base import Variable, Axis, Dataset
from geodata.gdal import addGDALtoDataset, addGDALtoVar, getAxes
from geodata.misc import AxisError, ArgumentError, NetCDFError
from utils.misc import flip, expandArgumentList
from utils.nctools import coerceAtts
# the environment variable RAMDISK contains the path to the RAM disk
ramdisk = os.getenv('RAMDISK', None)
if ramdisk and not os.path.exists(ramdisk):
raise IOError(ramdisk)
# helper function
def samplingUnits(sampling):
''' convert Pandas sampling letter to time units'''
if sampling == 's': units = 'seconds'
elif sampling == 'm': units = 'minutes'
elif sampling == 'D': units = 'days'
elif sampling == 'M': units = 'months'
elif sampling == 'Y': units = 'years'
else:
raise NotImplementedError(sampling)
return units
## functions to convert a raster dataset to NetCDF (time-step by time-step)
def convertRasterToNetCDF(filepath=None, raster_folder=None, raster_path_func=None, start_date=None, end_date=None, sampling='M',
ds_atts=None, vardefs=None, projection=None, geotransform=None, size=None, griddef=None,
lgzip=None, lgdal=True, lmask=True, lskipMissing=True, lfeedback=True, var_start_idx=None,
loverwrite=False, use_netcdf_tools=True, lzlib=True, **ncargs):
''' function to load a set of raster variables that are stored in a systematic directory tree into a NetCDF dataset
Variables are defined as follows:
vardefs[varname] = dict(name=string, units=string, axes=tuple of strings, atts=dict, plot=dict, dtype=np.dtype, fillValue=value)
Currently, only the horizontal raster axes and a datetime axis are supported; the former are inferred from griddef and
the latter is constructed from start_date, end_date and sampling and stored following CF convention.
The path to raster files is constructed as raster_folder+raster_path, where raster_path is the output of
raster_path_func(datetime, varname, **varatts), which has to be defined by the user.
'''
import pandas as pd
import netCDF4 as nc
from utils.nctools import add_coord, add_var, checkFillValue
## open NetCDF dataset
if isinstance(filepath,str) and ( loverwrite or not os.path.exists(filepath) ):
# generate list of datetimes from end dates and frequency
datetime64_array = np.arange(start_date,end_date, dtype='datetime64[{}]'.format(sampling))
xlon, ylat = (griddef.xlon, griddef.ylat) # used for checking
if use_netcdf_tools:
from geospatial.netcdf_tools import createGeoNetCDF
ncds = createGeoNetCDF(filepath, atts=ds_atts, time=datetime64_array, varatts=None, # default atts
crs=projection, geotrans=geotransform, size=size, griddef=griddef, # probably griddef...
nc_format=ncargs.get('format','NETCDF4'), zlib=lzlib, loverwrite=loverwrite)
else:
if 'format' not in ncargs: ncargs['format'] = 'NETCDF4'
ncargs['clobber'] = loverwrite
if loverwrite: ncargs['mode'] = 'w'
ncds = nc.Dataset(filepath, **ncargs)
# setup horizontal dimensions and coordinate variables
for ax in (xlon, ylat):
add_coord(ncds, name=ax.name, data=ax.coord, atts=ax.atts, zlib=lzlib)
# create time dimension and coordinate variable
start_datetime = datetime64_array[0]
time_coord = ( ( datetime64_array - start_datetime ) / np.timedelta64(1,sampling) ).astype('int64')
time_units_name = samplingUnits(sampling)
time_units = time_units_name + ' since ' + str(start_datetime)
tatts = dict(units=time_units, long_name=time_units_name.title(), sampling=sampling, start_date=start_date, end_date=end_date)
add_coord(ncds, name='time', data=time_coord, atts=tatts, zlib=lzlib)
# add attributes
if ds_atts is not None:
for key,value in coerceAtts(ds_atts).items():
ncds.setncattr(key,value)
elif isinstance(filepath,nc.Dataset) or ( isinstance(filepath,str) and os.path.exists(filepath) ):
assert not loverwrite, filepath
if isinstance(filepath,str):
# open exising dataset
ncargs['clobber'] = False
ncargs['mode'] = 'a' # append
ncds = nc.Dataset(filepath, **ncargs)
else:
ncds = filepath
# check mapping stuff
if griddef is None:
raise NotImplementedError("Inferring GridDef from file is not implemented yet.")
assert griddef.isProjected == ncds.getncattr('is_projected'), griddef.projection.ExportToProj4()
xlon, ylat = (griddef.xlon, griddef.ylat)
for ax in (xlon, ylat):
assert ax.name in ncds.dimensions, ax.name
assert len(ax) == len(ncds.dimensions[ax.name]), ax.name
assert ax.name in ncds.variables, ax.name
# set time interval
if start_date is not None:
print("Overwriting start date with NetCDF start date.")
start_date = ncds.getncattr('start_date')
if lfeedback: print("Setting start date to:",start_date)
start_date_dt = pd.to_datetime(start_date)
# check existing time axis
assert 'time' in ncds.dimensions, ncds.dimensions
assert 'time' in ncds.variables, ncds.variables
nc_end_date_dt = pd.to_datetime(ncds.getncattr('end_date'))
# check consistency of dates
days = (nc_end_date_dt - start_date_dt).days # timedelta
if sampling.upper() == 'D': time_len = days
else: raise NotImplementedError(sampling)
if time_len != len(ncds.dimensions['time']):
raise NetCDFError("Registered start and end dates are inconsistent with array length: {} != {}".format(time_len,len(ncds.dimensions['time'])))
# generate list of datetimes from end dates and frequency
datetime64_array = np.arange(start_date,end_date, dtype='datetime64[{}]'.format(sampling))
# verify time coordinate
start_datetime = datetime64_array[0]
time_coord = ( ( datetime64_array - start_datetime ) / np.timedelta64(1,sampling) ).astype('int64')
time_units = samplingUnits(sampling) + ' since ' + str(start_datetime)
tvar = ncds.variables['time']
assert tvar.units == time_units, tvar
assert np.all(tvar[:time_len] == time_coord[:time_len]), time_coord
# update time coordinate
tvar[time_len:] = time_coord[time_len:]
# update time stamps
if 'time_stamp' in ncds.variables:
ncds.variables['time_stamp'][time_len:] = np.datetime_as_string(datetime64_array[time_len:], unit=sampling)
else:
raise TypeError(filepath)
# add variables
if var_start_idx is None: var_start_idx = dict()
dimlist = ('time', ncds.ylat, ncds.xlon)
shape = tuple(len(ncds.dimensions[dim]) for dim in dimlist) # xlon is usually the innermost dimension
var_shp = (None,)+shape[1:] # time dimension remains extendable
for varname,varatts in vardefs.items():
nc_name = varatts.get('name',varname)
if nc_name in ncds.variables:
ncvar = ncds.variables[nc_name]
assert ncvar.shape == shape, shape # time dim is already updated/extended due to time coordinate
assert ncvar.dimensions == ('time',ylat.name,xlon.name), ('time',ylat.name,xlon.name)
if varname not in var_start_idx:
var_start_idx[varname] = time_len # where to start writing new data (can vary by variable)
else:
atts = dict(original_name=varname, units=varatts['units'])
add_var(ncds, name=nc_name, dims=dimlist, data=None, shape=var_shp, atts=atts,
dtype=varatts['dtype'], zlib=lzlib, lusestr=True)
var_start_idx[varname] = 0
ncds.sync()
#print(filepath)
#print(ncds)
if not lmask:
raise NotImplementedError("Need to handle missing values without mask - use lna=True?")
# get fillValues
fillValues = {varname:varatts.get('fillValue',None) for varname,varatts in vardefs.items()}
## loop over datetimes
for i,dt64 in enumerate(datetime64_array):
datetime = pd.to_datetime(dt64)
## loop over variables
for varname,varatts in vardefs.items():
i0 = var_start_idx[varname]
# skip existing data
if i >= i0:
# actually add data now
if i0 == i and i > 0 and lfeedback:
print("{}: appending after {} timesteps.".format(varname,i))
nc_name = varatts.get('name',varname)
fillValue = fillValues[varname]
# construct file names
raster_path = raster_path_func(datetime, varname, **varatts)
raster_path = raster_folder + raster_path
if lfeedback: print(raster_path)
# load raster data and save to NetCDF
if lgzip is None: # will only trigger once
lgzip = raster_path.endswith('.gz')
if os.path.exists(raster_path):
raster_data, geotrans, nodata = readASCIIraster(raster_path, lgzip=lgzip, lgdal=lgdal, dtype=varatts.get('dtype',np.float32),
lmask=lmask, fillValue=None, lgeotransform=True, lna=True)
assert all(np.isclose(geotrans,griddef.geotransform)), geotrans
if fillValue is None:
fillValues[varname] = nodata # remember for next field
elif fillValue != nodata:
raise NotImplementedError('No data/fill values need to be consistent: {} != {}'.format(fillValue,nodata))
# scale, if appropriate
if 'scalefactor' in varatts: raster_data *= varatts['scalefactor']
if 'offset' in varatts: raster_data += varatts['offset']
elif lskipMissing:
print("Skipping missing raster: '{}'".format(raster_path))
# create an array of missing data
if lmask:
raster_data = np.ma.masked_all(shape=var_shp[1:], dtype=varatts['dtype']) # no time dim; all masked
elif fillValue is not None:
raster_data = np.full(shape=var_shp[1:], fill_value=fillValue, dtype=varatts['dtype']) # no time dim; all filled
else:
NotImplementedError("Need to be able to generate missing data in order to skip missing raster.")
# save data to NetCDF
if lmask:
if fillValue is not None: raster_data = raster_data.filled(fillValue)
else: raster_data = raster_data.filled() # hopefully default was set...
ncds.variables[nc_name][i,:,:] = raster_data
## maybe compute some derived variables?
# set missing value flags (fillValue may have to be inferred from ascii, hence set last)
for varname,fillValue in fillValues.items():
nc_name = vardefs[varname].get('name',varname)
ncvar = ncds.variables[nc_name]
# make sure fillValue is OK (there have been problems...)
fillValue = checkFillValue(fillValue, ncvar.dtype)
if fillValue is not None:
ncvar.setncattr('missing_value',fillValue) # I use fillValue and missing_value the same way
# close file
ncds.sync(); ncds.close()
return filepath
## functions to construct Variables and Datasets from ASCII raster data
def rasterDataset(name=None, title=None, vardefs=None, axdefs=None, atts=None, projection=None, griddef=None,
lgzip=None, lgdal=True, lmask=True, fillValue=None, lskipMissing=True, lgeolocator=True,
file_pattern=None, lfeedback=True, **kwargs):
''' function to load a set of variables that are stored in raster format in a systematic directory tree into a Dataset
Variables and Axis are defined as follows:
vardefs[varname] = dict(name=string, units=string, axes=tuple of strings, atts=dict, plot=dict, dtype=np.dtype, fillValue=value)
axdefs[axname] = dict(name=string, units=string, atts=dict, coord=array or list) or None
The path to raster files is constructed as variable_pattern+axes_pattern, where axes_pattern is defined through the axes,
(as in rasterVarialbe) and variable_pattern takes the special keywords VAR, which is the variable key in vardefs.
'''
## prepare input data and axes
if griddef:
xlon,ylat = griddef.xlon,griddef.ylat
if projection is None:
projection = griddef.projection
elif projection != griddef.projection:
raise ArgumentError("Conflicting projection and GridDef!")
geotransform = griddef.geotransform
isProjected = griddef.isProjected
else:
xlon = ylat = geotransform = None
isProjected = False if projection is None else True
# construct axes dict
axes = dict()
for axname,axdef in list(axdefs.items()):
assert 'coord' in axdef, axdef
assert ( 'name' in axdef and 'units' in axdef ) or 'atts' in axdef, axdef
if axdef is None:
axes[axname] = None
else:
ax = Axis(**axdef)
axes[ax.name] = ax
# check for map Axis
if isProjected:
if 'x' not in axes: axes['x'] = xlon
if 'y' not in axes: axes['y'] = ylat
else:
if 'lon' not in axes: axes['lon'] = xlon
if 'lat' not in axes: axes['lat'] = ylat
## load raster data into Variable objects
varlist = []
for varname,vardef in list(vardefs.items()):
# check definitions
assert 'axes' in vardef and 'dtype' in vardef, vardef
assert ( 'name' in vardef and 'units' in vardef ) or 'atts' in vardef, vardef
# determine relevant axes
vardef = vardef.copy()
axes_list = [None if ax is None else axes[ax] for ax in vardef.pop('axes')]
# define path parameters (with varname)
path_params = vardef.pop('path_params',None)
path_params = dict() if path_params is None else path_params.copy()
if 'VAR' not in path_params: path_params['VAR'] = varname # a special key
# add kwargs and relevant axis indices
relaxes = [ax.name for ax in axes_list if ax is not None] # relevant axes
for key,value in list(kwargs.items()):
if key not in axes or key in relaxes:
vardef[key] = value
# create Variable object
var = rasterVariable(projection=projection, griddef=griddef, file_pattern=file_pattern, lgzip=lgzip, lgdal=lgdal,
lmask=lmask, lskipMissing=lskipMissing, axes=axes_list, path_params=path_params,
lfeedback=lfeedback, **vardef)
# vardef components: name, units, atts, plot, dtype, fillValue
varlist.append(var)
# check that map axes are correct
for ax in var.xlon,var.ylat:
if axes[ax.name] is None: axes[ax.name] = ax
elif axes[ax.name] != ax: raise AxisError("{} axes are incompatible.".format(ax.name))
if griddef is None: griddef = var.griddef
elif griddef != var.griddef: raise AxisError("GridDefs are inconsistent.")
if geotransform is None: geotransform = var.geotransform
elif geotransform != var.geotransform:
raise AxisError("Conflicting geotransform (from Variable) and GridDef!\n {} != {}".format(var.geotransform,geotransform))
## create Dataset
# create dataset
dataset = Dataset(name=name, title=title, varlist=varlist, axes=axes, atts=atts)
# add GDAL functionality
dataset = addGDALtoDataset(dataset, griddef=griddef, projection=projection, geotransform=geotransform, gridfolder=None,
lwrap360=None, geolocator=lgeolocator, lforce=False)
# N.B.: for some reason we also need to pass the geotransform, otherwise it is recomputed internally and some consistency
# checks fail due to machine-precision differences
# return GDAL-enabled Dataset
return dataset
def rasterVariable(name=None, units=None, axes=None, atts=None, plot=None, dtype=None, projection=None, griddef=None,
file_pattern=None, lgzip=None, lgdal=True, lmask=True, fillValue=None, lskipMissing=True,
path_params=None, offset=0, scalefactor=1, transform=None, time_axis=None, lfeedback=False, **kwargs):
''' function to read multi-dimensional raster data and construct a GDAL-enabled Variable object '''
# print status
if lfeedback: print("Loading variable '{}': ".format(name), end=' ') # no newline
## figure out axes arguments and load data
# figure out axes (list/tuple of axes has to be ordered correctly!)
axes_list = [ax.name for ax in axes[:-2]]
# N.B.: the last two axes are the two horizontal map axes (x&y); they can be None and will be inferred from raster
# N.B.: coordinate values can be overridden with keyword arguments, but length must be consistent
# figure out coordinates for axes
for ax in axes[:-2]:
if ax.name in kwargs:
# just make sure the dimensions match, but use keyword argument
if not len(kwargs[ax.name]) == len(ax):
raise AxisError("Length of Variable axis and raster file dimension have to be equal.")
else:
# use Axis coordinates and add to kwargs for readRasterArray call
kwargs[ax.name] = tuple(ax.coord)
# load raster data
if lfeedback: print(("'{}'".format(file_pattern)))
data, geotransform = readRasterArray(file_pattern, lgzip=lgzip, lgdal=lgdal, dtype=dtype, lmask=lmask,
fillValue=fillValue, lgeotransform=True, axes=axes_list, lna=False,
lskipMissing=lskipMissing, path_params=path_params, lfeedback=lfeedback, **kwargs)
# shift and rescale
if offset != 0: data += offset
if scalefactor != 1: data *= scalefactor
## create Variable object and add GDAL
# check map axes and generate if necessary
xlon, ylat = getAxes(geotransform, xlen=data.shape[-1], ylen=data.shape[-2],
projected=griddef.isProjected if griddef else bool(projection))
axes = list(axes)
if axes[-1] is None: axes[-1] = xlon
elif len(axes[-1]) != len(xlon): raise AxisError(axes[-1])
if axes[-2] is None: axes[-2] = ylat
elif len(axes[-2]) != len(ylat): raise AxisError(axes[-2])
# create regular Variable with data in memory
var = Variable(name=name, units=units, axes=axes, data=data, dtype=dtype, mask=None, fillValue=fillValue,
atts=atts, plot=plot)
# apply transform (if any), now that we have axes etc.
if transform is not None: var = transform(var=var, time_axis=time_axis)
# add GDAL functionality
if griddef is not None:
# perform some consistency checks ...
if projection is None:
projection = griddef.projection
elif projection != griddef.projection:
raise ArgumentError("Conflicting projection and GridDef!\n {} != {}".format(projection,griddef.projection))
if not np.isclose(geotransform, griddef.geotransform).all():
raise ArgumentError("Conflicting geotransform (from raster) and GridDef!\n {} != {}".format(geotransform,griddef.geotransform))
# ... and use provided geotransform (due to issues with numerical precision, this is usually better)
geotransform = griddef.geotransform # if we don't pass the geotransform explicitly, it will be recomputed from the axes
# add GDAL functionality
var = addGDALtoVar(var, griddef=griddef, projection=projection, geotransform=geotransform, gridfolder=None)
# return final, GDAL-enabled variable
return var
## functions to load ASCII raster data
def readRasterArray(file_pattern, lgzip=None, lgdal=True, dtype=np.float32, lmask=True, fillValue=None, lfeedback=False,
lgeotransform=True, axes=None, lna=False, lskipMissing=False, path_params=None, **kwargs):
''' function to load a multi-dimensional numpy array from several structured ASCII raster files '''
if axes is None: raise NotImplementedError
#TODO: implement automatic detection of axes arguments and axes order
## expand path argument and figure out dimensions
# collect axes arguments
shape = []; axes_kwargs = dict()
for ax in axes:
if ax not in kwargs: raise AxisError(ax)
coord = kwargs.pop(ax)
shape.append(len(coord))
axes_kwargs[ax] = coord
assert len(axes) == len(shape) == len(axes_kwargs)
shape = tuple(shape)
#TODO: add handling of embedded inner product expansion
# argument expansion using outer product
file_kwargs_list = expandArgumentList(outer_list=axes, **axes_kwargs)
assert np.prod(shape) == len(file_kwargs_list)
## load data from raster files and assemble array
path_params = dict() if path_params is None else path_params.copy() # will be modified
# find first valid 2D raster to determine shape
i0 = 0
path_params.update(file_kwargs_list[i0]) # update axes parameters
filepath = file_pattern.format(**path_params) # construct file name
if not os.path.exists(filepath):
if lskipMissing: # find first valid
while not os.path.exists(filepath):
i0 += 1 # go to next raster file
if i0 >= len(file_kwargs_list):
raise IOError("No valid input raster files found!\n'{}'".format(filepath))
if lfeedback: print(' ', end=' ')
path_params.update(file_kwargs_list[i0]) # update axes parameters
filepath = file_pattern.format(**path_params) # nest in line
else: # or raise error
raise IOError(filepath)
# read first 2D raster file
data2D = readASCIIraster(filepath, lgzip=lgzip, lgdal=lgdal, dtype=dtype, lna=True,
lmask=lmask, fillValue=fillValue, lgeotransform=lgeotransform, **kwargs)
if lgeotransform: data2D, geotransform0, na = data2D
else: data2D, na = data2D # we might still need na, but no need to check if it is the same
shape2D = data2D.shape # get 2D raster shape for later use
# allocate data array
list_shape = (np.prod(shape),)+shape2D # assume 3D shape to concatenate 2D rasters
if lmask:
data = ma.empty(list_shape, dtype=dtype)
if fillValue is None: data._fill_value = data2D._fill_value
else: data._fill_value = fillValue
data.mask = True # initialize everything as masked
else: data = np.empty(list_shape, dtype=dtype) # allocate the array
assert data.shape[0] == len(file_kwargs_list), (data.shape, len(file_kwargs_list))
# insert (up to) first raster before continuing
if lskipMissing and i0 > 0:
data[:i0,:,:] = ma.masked if lmask else fillValue # mask all invalid rasters up to first valid raster
data[i0,:,:] = data2D # add first (valid) raster
# loop over remaining 2D raster files
for i,file_kwargs in enumerate(file_kwargs_list[i0:]):
path_params.update(file_kwargs) # update axes parameters
filepath = file_pattern.format(**path_params) # construct file name
if os.path.exists(filepath):
if lfeedback: print('.', end=' ') # indicate data with bar/pipe
# read 2D raster file
data2D = readASCIIraster(filepath, lgzip=lgzip, lgdal=lgdal, dtype=dtype, lna=False,
lmask=lmask, fillValue=fillValue, lgeotransform=lgeotransform, **kwargs)
# check geotransform
if lgeotransform:
data2D, geotransform = data2D
if not geotransform == geotransform0:
raise AxisError(geotransform) # to make sure all geotransforms are identical!
else: geotransform = None
# size information
if not shape2D == data2D.shape:
raise AxisError(data2D.shape) # to make sure all geotransforms are identical!
# insert 2D raster into 3D array
data[i+i0,:,:] = data2D # raster shape has to match
elif lskipMissing:
# fill with masked values
data[i+i0,:,:] = ma.masked # mask missing raster
if lfeedback: print(' ', end=' ') # indicate missing with dot
else:
raise IOError(filepath)
# complete feedback with linebreak
if lfeedback: print('')
# reshape and check dimensions
assert i+i0 == data.shape[0]-1, (i,i0)
data = data.reshape(shape+shape2D) # now we have the full shape
gc.collect() # remove duplicate data
# return data and optional meta data
if lgeotransform or lna:
return_data = (data,)
if lgeotransform: return_data += (geotransform,)
if lna: return_data += (na,)
else:
return_data = data
return return_data
def readASCIIraster(filepath, lgzip=None, lgdal=True, dtype=np.float32, lmask=True, fillValue=None,
lgeotransform=True, lna=False, **kwargs):
''' load a 2D field from an ASCII raster file (can be compressed); return (masked) numpy array and geotransform '''
# handle compression (currently only gzip)
if lgzip is None: lgzip = filepath[-3:] == '.gz' # try to auto-detect
if lgdal:
# gdal imports (allow to skip if GDAL is not installed)
from osgeo import gdal
os.environ.setdefault('GDAL_DATA','/usr/local/share/gdal') # set default environment variable to prevent problems in IPython Notebooks
gdal.UseExceptions() # use exceptions (off by default)
if lgzip and not ( ramdisk and os.path.exists(ramdisk) ):
raise IOError("RAM disk '{}' not found; RAM disk is required to unzip raster files for GDAL.".format(ramdisk) +
"\nSet the RAM disk location using the RAMDISK environment variable.")
## use GDAL to read raster and parse meta data
ds = tmp = None # for graceful exit
try:
# if file is compressed, create temporary decompresse file
if lgzip:
with gzip.open(filepath, mode='rb') as gz, tempfile.NamedTemporaryFile(mode='wb', dir=ramdisk, delete=False) as tmp:
shutil.copyfileobj(gz, tmp)
filepath = tmp.name # full path of the temporary file (must not be deleted upon close!)
# open file as GDAL dataset and read raster band into Numpy array
ds = gdal.Open(filepath)
assert ds.RasterCount == 1, ds.RasterCount
band = ds.GetRasterBand(1)
# get some meta data
ie, je = band.XSize, band.YSize
na = band.GetNoDataValue()
if lgeotransform:
geotransform = ds.GetGeoTransform()
lflip = geotransform[5] < 0
else: lflip = True
# get data array and transform into a masked array
data = band.ReadAsArray(0, 0, ie, je).astype(dtype)
if lflip:
data = flip(data, axis=-2) # flip y-axis
if lgeotransform:
assert geotransform[4] == 0, geotransform
geotransform = geotransform[:3]+(geotransform[3]+je*geotransform[5],0,-1*geotransform[5])
if lmask:
data = ma.masked_equal(data, value=na, copy=False)
if fillValue is not None: data._fill_value = fillValue
else: data._fill_value = na
elif fillValue is not None:
data[data == na] = fillValue # replace original fill value
except Exception as e:
raise e
finally:
# clean-up
del ds # neds to be deleted, before tmp-file can be deleted - Windows is very pedantic about this...
if lgzip and tmp is not None:
os.remove(filepath) # remove temporary file
del tmp # close GDAL dataset and temporary file
else:
## parse header manually and use Numpy's genfromtxt to read array
# handle compression on the fly (no temporary file)
if lgzip: Raster = gzip.open(filepath, mode='rb')
else: Raster = open(filepath, mode='rb')
# open file
with Raster:
# read header information
headers = ('NCOLS','NROWS','XLLCORNER','YLLCORNER','CELLSIZE','NODATA_VALUE')
hdtypes = (int,int,float,float,float,dtype)
hvalues = []
# loop over items
for header,hdtype in zip(headers,hdtypes):
name, val = Raster.readline().split()
if name.upper() != header:
raise IOError("Unknown header info: '{:s}' != '{:s}'".format(name,header))
hvalues.append(hdtype(val))
ie, je, xll, yll, d, na = hvalues
# derive geotransform
if lgeotransform: geotransform = (xll, d, 0., yll, 0., d)
# read data
#print ie, je, xll, yll, d, na
# N.B.: the file cursor is already moved to the end of the header, hence skip_header=0
data = np.genfromtxt(Raster, skip_header=0, dtype=dtype, usemask=lmask,
missing_values=na, filling_values=fillValue, **kwargs)
if not data.shape == (je,ie):
raise IOError(data.shape, ie, je, xll, yll, d, na,)
# return data and optional meta data
if lgeotransform or lna:
return_data = (data,)
if lgeotransform: return_data += (geotransform,)
if lna: return_data += (na,)
else:
return_data = data
return return_data
| gpl-3.0 |
GeraldLoeffler/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/collections.py | 69 | 39876 | """
Classes for the efficient drawing of large collections of objects that
share most properties, e.g. a large number of line segments or
polygons.
The classes are not meant to be as flexible as their single element
counterparts (e.g. you may not be able to select all line styles) but
they are meant to be fast for common use cases (e.g. a bunch of solid
line segemnts)
"""
import copy, math, warnings
import numpy as np
from numpy import ma
import matplotlib as mpl
import matplotlib.cbook as cbook
import matplotlib.colors as _colors # avoid conflict with kwarg
import matplotlib.cm as cm
import matplotlib.transforms as transforms
import matplotlib.artist as artist
import matplotlib.backend_bases as backend_bases
import matplotlib.path as mpath
import matplotlib.mlab as mlab
class Collection(artist.Artist, cm.ScalarMappable):
"""
Base class for Collections. Must be subclassed to be usable.
All properties in a collection must be sequences or scalars;
if scalars, they will be converted to sequences. The
property of the ith element of the collection is::
prop[i % len(props)]
Keyword arguments and default values:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets).
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional. If
the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not None
(ie a call to set_array has been made), at draw time a call to
scalar mappable will be made to set the face colors.
"""
_offsets = np.array([], np.float_)
_transOffset = transforms.IdentityTransform()
_transforms = []
zorder = 1
def __init__(self,
edgecolors=None,
facecolors=None,
linewidths=None,
linestyles='solid',
antialiaseds = None,
offsets = None,
transOffset = None,
norm = None, # optional for ScalarMappable
cmap = None, # ditto
pickradius = 5.0,
urls = None,
**kwargs
):
"""
Create a Collection
%(Collection)s
"""
artist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
self.set_edgecolor(edgecolors)
self.set_facecolor(facecolors)
self.set_linewidth(linewidths)
self.set_linestyle(linestyles)
self.set_antialiased(antialiaseds)
self.set_urls(urls)
self._uniform_offsets = None
self._offsets = np.array([], np.float_)
if offsets is not None:
offsets = np.asarray(offsets)
if len(offsets.shape) == 1:
offsets = offsets[np.newaxis,:] # Make it Nx2.
if transOffset is not None:
self._offsets = offsets
self._transOffset = transOffset
else:
self._uniform_offsets = offsets
self._pickradius = pickradius
self.update(kwargs)
def _get_value(self, val):
try: return (float(val), )
except TypeError:
if cbook.iterable(val) and len(val):
try: float(val[0])
except TypeError: pass # raise below
else: return val
raise TypeError('val must be a float or nonzero sequence of floats')
def _get_bool(self, val):
try: return (bool(val), )
except TypeError:
if cbook.iterable(val) and len(val):
try: bool(val[0])
except TypeError: pass # raise below
else: return val
raise TypeError('val must be a bool or nonzero sequence of them')
def get_paths(self):
raise NotImplementedError
def get_transforms(self):
return self._transforms
def get_datalim(self, transData):
transform = self.get_transform()
transOffset = self._transOffset
offsets = self._offsets
paths = self.get_paths()
if not transform.is_affine:
paths = [transform.transform_path_non_affine(p) for p in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
offsets = np.asarray(offsets, np.float_)
result = mpath.get_path_collection_extents(
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset.frozen())
result = result.inverse_transformed(transData)
return result
def get_window_extent(self, renderer):
bbox = self.get_datalim(transforms.IdentityTransform())
#TODO:check to ensure that this does not fail for
#cases other than scatter plot legend
return bbox
def _prepare_points(self):
"""Point prep for drawing and hit testing"""
transform = self.get_transform()
transOffset = self._transOffset
offsets = self._offsets
paths = self.get_paths()
if self.have_units():
paths = []
for path in self.get_paths():
vertices = path.vertices
xs, ys = vertices[:, 0], vertices[:, 1]
xs = self.convert_xunits(xs)
ys = self.convert_yunits(ys)
paths.append(mpath.Path(zip(xs, ys), path.codes))
if len(self._offsets):
xs = self.convert_xunits(self._offsets[:0])
ys = self.convert_yunits(self._offsets[:1])
offsets = zip(xs, ys)
offsets = np.asarray(offsets, np.float_)
if not transform.is_affine:
paths = [transform.transform_path_non_affine(path) for path in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
return transform, transOffset, offsets, paths
def draw(self, renderer):
if not self.get_visible(): return
renderer.open_group(self.__class__.__name__)
self.update_scalarmappable()
clippath, clippath_trans = self.get_transformed_clip_path_and_affine()
if clippath_trans is not None:
clippath_trans = clippath_trans.frozen()
transform, transOffset, offsets, paths = self._prepare_points()
renderer.draw_path_collection(
transform.frozen(), self.clipbox, clippath, clippath_trans,
paths, self.get_transforms(),
offsets, transOffset,
self.get_facecolor(), self.get_edgecolor(), self._linewidths,
self._linestyles, self._antialiaseds, self._urls)
renderer.close_group(self.__class__.__name__)
def contains(self, mouseevent):
"""
Test whether the mouse event occurred in the collection.
Returns True | False, ``dict(ind=itemlist)``, where every
item in itemlist contains the event.
"""
if callable(self._contains): return self._contains(self,mouseevent)
if not self.get_visible(): return False,{}
transform, transOffset, offsets, paths = self._prepare_points()
ind = mpath.point_in_path_collection(
mouseevent.x, mouseevent.y, self._pickradius,
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset, len(self._facecolors)>0)
return len(ind)>0,dict(ind=ind)
def set_pickradius(self,pickradius): self.pickradius = 5
def get_pickradius(self): return self.pickradius
def set_urls(self, urls):
if urls is None:
self._urls = [None,]
else:
self._urls = urls
def get_urls(self): return self._urls
def set_offsets(self, offsets):
"""
Set the offsets for the collection. *offsets* can be a scalar
or a sequence.
ACCEPTS: float or sequence of floats
"""
offsets = np.asarray(offsets, np.float_)
if len(offsets.shape) == 1:
offsets = offsets[np.newaxis,:] # Make it Nx2.
#This decision is based on how they are initialized above
if self._uniform_offsets is None:
self._offsets = offsets
else:
self._uniform_offsets = offsets
def get_offsets(self):
"""
Return the offsets for the collection.
"""
#This decision is based on how they are initialized above in __init__()
if self._uniform_offsets is None:
return self._offsets
else:
return self._uniform_offsets
def set_linewidth(self, lw):
"""
Set the linewidth(s) for the collection. *lw* can be a scalar
or a sequence; if it is a sequence the patches will cycle
through the sequence
ACCEPTS: float or sequence of floats
"""
if lw is None: lw = mpl.rcParams['patch.linewidth']
self._linewidths = self._get_value(lw)
def set_linewidths(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_lw(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_linestyle(self, ls):
"""
Set the linestyle(s) for the collection.
ACCEPTS: ['solid' | 'dashed', 'dashdot', 'dotted' |
(offset, on-off-dash-seq) ]
"""
try:
dashd = backend_bases.GraphicsContextBase.dashd
if cbook.is_string_like(ls):
if ls in dashd:
dashes = [dashd[ls]]
elif ls in cbook.ls_mapper:
dashes = [dashd[cbook.ls_mapper[ls]]]
else:
raise ValueError()
elif cbook.iterable(ls):
try:
dashes = []
for x in ls:
if cbook.is_string_like(x):
if x in dashd:
dashes.append(dashd[x])
elif x in cbook.ls_mapper:
dashes.append(dashd[cbook.ls_mapper[x]])
else:
raise ValueError()
elif cbook.iterable(x) and len(x) == 2:
dashes.append(x)
else:
raise ValueError()
except ValueError:
if len(ls)==2:
dashes = ls
else:
raise ValueError()
else:
raise ValueError()
except ValueError:
raise ValueError('Do not know how to convert %s to dashes'%ls)
self._linestyles = dashes
def set_linestyles(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_dashes(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_antialiased(self, aa):
"""
Set the antialiasing state for rendering.
ACCEPTS: Boolean or sequence of booleans
"""
if aa is None:
aa = mpl.rcParams['patch.antialiased']
self._antialiaseds = self._get_bool(aa)
def set_antialiaseds(self, aa):
"""alias for set_antialiased"""
return self.set_antialiased(aa)
def set_color(self, c):
"""
Set both the edgecolor and the facecolor.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
"""
self.set_facecolor(c)
self.set_edgecolor(c)
def set_facecolor(self, c):
"""
Set the facecolor(s) of the collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
if c is None: c = mpl.rcParams['patch.facecolor']
self._facecolors_original = c
self._facecolors = _colors.colorConverter.to_rgba_array(c, self._alpha)
def set_facecolors(self, c):
"""alias for set_facecolor"""
return self.set_facecolor(c)
def get_facecolor(self):
return self._facecolors
get_facecolors = get_facecolor
def get_edgecolor(self):
if self._edgecolors == 'face':
return self.get_facecolors()
else:
return self._edgecolors
get_edgecolors = get_edgecolor
def set_edgecolor(self, c):
"""
Set the edgecolor(s) of the collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence.
If *c* is 'face', the edge color will always be the same as
the face color.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
if c == 'face':
self._edgecolors = 'face'
self._edgecolors_original = 'face'
else:
if c is None: c = mpl.rcParams['patch.edgecolor']
self._edgecolors_original = c
self._edgecolors = _colors.colorConverter.to_rgba_array(c, self._alpha)
def set_edgecolors(self, c):
"""alias for set_edgecolor"""
return self.set_edgecolor(c)
def set_alpha(self, alpha):
"""
Set the alpha tranparencies of the collection. *alpha* must be
a float.
ACCEPTS: float
"""
try: float(alpha)
except TypeError: raise TypeError('alpha must be a float')
else:
artist.Artist.set_alpha(self, alpha)
try:
self._facecolors = _colors.colorConverter.to_rgba_array(
self._facecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
if self._edgecolors_original != 'face':
self._edgecolors = _colors.colorConverter.to_rgba_array(
self._edgecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
def get_linewidths(self):
return self._linewidths
get_linewidth = get_linewidths
def get_linestyles(self):
return self._linestyles
get_dashes = get_linestyle = get_linestyles
def update_scalarmappable(self):
"""
If the scalar mappable array is not none, update colors
from scalar data
"""
if self._A is None: return
if self._A.ndim > 1:
raise ValueError('Collections can only map rank 1 arrays')
if len(self._facecolors):
self._facecolors = self.to_rgba(self._A, self._alpha)
else:
self._edgecolors = self.to_rgba(self._A, self._alpha)
def update_from(self, other):
'copy properties from other to self'
artist.Artist.update_from(self, other)
self._antialiaseds = other._antialiaseds
self._edgecolors_original = other._edgecolors_original
self._edgecolors = other._edgecolors
self._facecolors_original = other._facecolors_original
self._facecolors = other._facecolors
self._linewidths = other._linewidths
self._linestyles = other._linestyles
self._pickradius = other._pickradius
# these are not available for the object inspector until after the
# class is built so we define an initial set here for the init
# function and they will be overridden after object defn
artist.kwdocd['Collection'] = """\
Valid Collection keyword arguments:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets)
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
"""
class QuadMesh(Collection):
"""
Class for the efficient drawing of a quadrilateral mesh.
A quadrilateral mesh consists of a grid of vertices. The
dimensions of this array are (*meshWidth* + 1, *meshHeight* +
1). Each vertex in the mesh has a different set of "mesh
coordinates" representing its position in the topology of the
mesh. For any values (*m*, *n*) such that 0 <= *m* <= *meshWidth*
and 0 <= *n* <= *meshHeight*, the vertices at mesh coordinates
(*m*, *n*), (*m*, *n* + 1), (*m* + 1, *n* + 1), and (*m* + 1, *n*)
form one of the quadrilaterals in the mesh. There are thus
(*meshWidth* * *meshHeight*) quadrilaterals in the mesh. The mesh
need not be regular and the polygons need not be convex.
A quadrilateral mesh is represented by a (2 x ((*meshWidth* + 1) *
(*meshHeight* + 1))) numpy array *coordinates*, where each row is
the *x* and *y* coordinates of one of the vertices. To define the
function that maps from a data point to its corresponding color,
use the :meth:`set_cmap` method. Each of these arrays is indexed in
row-major order by the mesh coordinates of the vertex (or the mesh
coordinates of the lower left vertex, in the case of the
colors).
For example, the first entry in *coordinates* is the
coordinates of the vertex at mesh coordinates (0, 0), then the one
at (0, 1), then at (0, 2) .. (0, meshWidth), (1, 0), (1, 1), and
so on.
"""
def __init__(self, meshWidth, meshHeight, coordinates, showedges, antialiased=True):
Collection.__init__(self)
self._meshWidth = meshWidth
self._meshHeight = meshHeight
self._coordinates = coordinates
self._showedges = showedges
self._antialiased = antialiased
self._paths = None
self._bbox = transforms.Bbox.unit()
self._bbox.update_from_data_xy(coordinates.reshape(
((meshWidth + 1) * (meshHeight + 1), 2)))
# By converting to floats now, we can avoid that on every draw.
self._coordinates = self._coordinates.reshape((meshHeight + 1, meshWidth + 1, 2))
self._coordinates = np.array(self._coordinates, np.float_)
def get_paths(self, dataTrans=None):
if self._paths is None:
self._paths = self.convert_mesh_to_paths(
self._meshWidth, self._meshHeight, self._coordinates)
return self._paths
#@staticmethod
def convert_mesh_to_paths(meshWidth, meshHeight, coordinates):
"""
Converts a given mesh into a sequence of
:class:`matplotlib.path.Path` objects for easier rendering by
backends that do not directly support quadmeshes.
This function is primarily of use to backend implementers.
"""
Path = mpath.Path
if ma.isMaskedArray(coordinates):
c = coordinates.data
else:
c = coordinates
points = np.concatenate((
c[0:-1, 0:-1],
c[0:-1, 1: ],
c[1: , 1: ],
c[1: , 0:-1],
c[0:-1, 0:-1]
), axis=2)
points = points.reshape((meshWidth * meshHeight, 5, 2))
return [Path(x) for x in points]
convert_mesh_to_paths = staticmethod(convert_mesh_to_paths)
def get_datalim(self, transData):
return self._bbox
def draw(self, renderer):
if not self.get_visible(): return
renderer.open_group(self.__class__.__name__)
transform = self.get_transform()
transOffset = self._transOffset
offsets = self._offsets
if self.have_units():
if len(self._offsets):
xs = self.convert_xunits(self._offsets[:0])
ys = self.convert_yunits(self._offsets[:1])
offsets = zip(xs, ys)
offsets = np.asarray(offsets, np.float_)
if self.check_update('array'):
self.update_scalarmappable()
clippath, clippath_trans = self.get_transformed_clip_path_and_affine()
if clippath_trans is not None:
clippath_trans = clippath_trans.frozen()
if not transform.is_affine:
coordinates = self._coordinates.reshape(
(self._coordinates.shape[0] *
self._coordinates.shape[1],
2))
coordinates = transform.transform(coordinates)
coordinates = coordinates.reshape(self._coordinates.shape)
transform = transforms.IdentityTransform()
else:
coordinates = self._coordinates
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
renderer.draw_quad_mesh(
transform.frozen(), self.clipbox, clippath, clippath_trans,
self._meshWidth, self._meshHeight, coordinates,
offsets, transOffset, self.get_facecolor(), self._antialiased,
self._showedges)
renderer.close_group(self.__class__.__name__)
class PolyCollection(Collection):
def __init__(self, verts, sizes = None, closed = True, **kwargs):
"""
*verts* is a sequence of ( *verts0*, *verts1*, ...) where
*verts_i* is a sequence of *xy* tuples of vertices, or an
equivalent :mod:`numpy` array of shape (*nv*, 2).
*sizes* is *None* (default) or a sequence of floats that
scale the corresponding *verts_i*. The scaling is applied
before the Artist master transform; if the latter is an identity
transform, then the overall scaling is such that if
*verts_i* specify a unit square, then *sizes_i* is the area
of that square in points^2.
If len(*sizes*) < *nv*, the additional values will be
taken cyclically from the array.
*closed*, when *True*, will explicitly close the polygon.
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self.set_verts(verts, closed)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def set_verts(self, verts, closed=True):
'''This allows one to delay initialization of the vertices.'''
if closed:
self._paths = []
for xy in verts:
if np.ma.isMaskedArray(xy):
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.ma.concatenate([xy, [xy[0]]])
else:
xy = np.asarray(xy)
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.concatenate([xy, [xy[0]]])
self._paths.append(mpath.Path(xy))
else:
self._paths = [mpath.Path(xy) for xy in verts]
def get_paths(self):
return self._paths
def draw(self, renderer):
if self._sizes is not None:
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0))
for x in self._sizes]
return Collection.draw(self, renderer)
class BrokenBarHCollection(PolyCollection):
"""
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
"""
def __init__(self, xranges, yrange, **kwargs):
"""
*xranges*
sequence of (*xmin*, *xwidth*)
*yrange*
*ymin*, *ywidth*
%(Collection)s
"""
ymin, ywidth = yrange
ymax = ymin + ywidth
verts = [ [(xmin, ymin), (xmin, ymax), (xmin+xwidth, ymax), (xmin+xwidth, ymin), (xmin, ymin)] for xmin, xwidth in xranges]
PolyCollection.__init__(self, verts, **kwargs)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
@staticmethod
def span_where(x, ymin, ymax, where, **kwargs):
"""
Create a BrokenBarHCollection to plot horizontal bars from
over the regions in *x* where *where* is True. The bars range
on the y-axis from *ymin* to *ymax*
A :class:`BrokenBarHCollection` is returned.
*kwargs* are passed on to the collection
"""
xranges = []
for ind0, ind1 in mlab.contiguous_regions(where):
xslice = x[ind0:ind1]
if not len(xslice):
continue
xranges.append((xslice[0], xslice[-1]-xslice[0]))
collection = BrokenBarHCollection(xranges, [ymin, ymax-ymin], **kwargs)
return collection
class RegularPolyCollection(Collection):
"""Draw a collection of regular polygons with *numsides*."""
_path_generator = mpath.Path.unit_regular_polygon
def __init__(self,
numsides,
rotation = 0 ,
sizes = (1,),
**kwargs):
"""
*numsides*
the number of sides of the polygon
*rotation*
the rotation of the polygon in radians
*sizes*
gives the area of the circle circumscribing the
regular polygon in points^2
%(Collection)s
Example: see :file:`examples/dynamic_collection.py` for
complete example::
offsets = np.random.rand(20,2)
facecolors = [cm.jet(x) for x in np.random.rand(20)]
black = (0,0,0,1)
collection = RegularPolyCollection(
numsides=5, # a pentagon
rotation=0, sizes=(50,),
facecolors = facecolors,
edgecolors = (black,),
linewidths = (1,),
offsets = offsets,
transOffset = ax.transData,
)
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self._numsides = numsides
self._paths = [self._path_generator(numsides)]
self._rotation = rotation
self.set_transform(transforms.IdentityTransform())
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def draw(self, renderer):
self._transforms = [
transforms.Affine2D().rotate(-self._rotation).scale(
(np.sqrt(x) * self.figure.dpi / 72.0) / np.sqrt(np.pi))
for x in self._sizes]
return Collection.draw(self, renderer)
def get_paths(self):
return self._paths
def get_numsides(self):
return self._numsides
def get_rotation(self):
return self._rotation
def get_sizes(self):
return self._sizes
class StarPolygonCollection(RegularPolyCollection):
"""
Draw a collection of regular stars with *numsides* points."""
_path_generator = mpath.Path.unit_regular_star
class AsteriskPolygonCollection(RegularPolyCollection):
"""
Draw a collection of regular asterisks with *numsides* points."""
_path_generator = mpath.Path.unit_regular_asterisk
class LineCollection(Collection):
"""
All parameters must be sequences or scalars; if scalars, they will
be converted to sequences. The property of the ith line
segment is::
prop[i % len(props)]
i.e., the properties cycle if the ``len`` of props is less than the
number of segments.
"""
zorder = 2
def __init__(self, segments, # Can be None.
linewidths = None,
colors = None,
antialiaseds = None,
linestyles = 'solid',
offsets = None,
transOffset = None,
norm = None,
cmap = None,
pickradius = 5,
**kwargs
):
"""
*segments*
a sequence of (*line0*, *line1*, *line2*), where::
linen = (x0, y0), (x1, y1), ... (xm, ym)
or the equivalent numpy array with two columns. Each line
can be a different length.
*colors*
must be a sequence of RGBA tuples (eg arbitrary color
strings, etc, not allowed).
*antialiaseds*
must be a sequence of ones or zeros
*linestyles* [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
a string or dash tuple. The dash tuple is::
(offset, onoffseq),
where *onoffseq* is an even length tuple of on and off ink
in points.
If *linewidths*, *colors*, or *antialiaseds* is None, they
default to their rcParams setting, in sequence form.
If *offsets* and *transOffset* are not None, then
*offsets* are transformed by *transOffset* and applied after
the segments have been transformed to display coordinates.
If *offsets* is not None but *transOffset* is None, then the
*offsets* are added to the segments before any transformation.
In this case, a single offset can be specified as::
offsets=(xo,yo)
and this value will be added cumulatively to each successive
segment, so as to produce a set of successively offset curves.
*norm*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*cmap*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*pickradius* is the tolerance for mouse clicks picking a line.
The default is 5 pt.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` matrix
:attr:`~matplotlib.cm.ScalarMappable._A` is not None (ie a call to
:meth:`~matplotlib.cm.ScalarMappable.set_array` has been made), at
draw time a call to scalar mappable will be made to set the colors.
"""
if colors is None: colors = mpl.rcParams['lines.color']
if linewidths is None: linewidths = (mpl.rcParams['lines.linewidth'],)
if antialiaseds is None: antialiaseds = (mpl.rcParams['lines.antialiased'],)
self.set_linestyles(linestyles)
colors = _colors.colorConverter.to_rgba_array(colors)
Collection.__init__(
self,
edgecolors=colors,
linewidths=linewidths,
linestyles=linestyles,
antialiaseds=antialiaseds,
offsets=offsets,
transOffset=transOffset,
norm=norm,
cmap=cmap,
pickradius=pickradius,
**kwargs)
self.set_facecolors([])
self.set_segments(segments)
def get_paths(self):
return self._paths
def set_segments(self, segments):
if segments is None: return
_segments = []
for seg in segments:
if not np.ma.isMaskedArray(seg):
seg = np.asarray(seg, np.float_)
_segments.append(seg)
if self._uniform_offsets is not None:
_segments = self._add_offsets(_segments)
self._paths = [mpath.Path(seg) for seg in _segments]
set_verts = set_segments # for compatibility with PolyCollection
def _add_offsets(self, segs):
offsets = self._uniform_offsets
Nsegs = len(segs)
Noffs = offsets.shape[0]
if Noffs == 1:
for i in range(Nsegs):
segs[i] = segs[i] + i * offsets
else:
for i in range(Nsegs):
io = i%Noffs
segs[i] = segs[i] + offsets[io:io+1]
return segs
def set_color(self, c):
"""
Set the color(s) of the line collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
self._edgecolors = _colors.colorConverter.to_rgba_array(c)
def color(self, c):
"""
Set the color(s) of the line collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
warnings.warn('LineCollection.color deprecated; use set_color instead')
return self.set_color(c)
def get_color(self):
return self._edgecolors
get_colors = get_color # for compatibility with old versions
class CircleCollection(Collection):
"""
A collection of circles, drawn using splines.
"""
def __init__(self, sizes, **kwargs):
"""
*sizes*
Gives the area of the circle in points^2
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self.set_transform(transforms.IdentityTransform())
self._paths = [mpath.Path.unit_circle()]
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def draw(self, renderer):
# sizes is the area of the circle circumscribing the polygon
# in points^2
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0) / np.sqrt(np.pi))
for x in self._sizes]
return Collection.draw(self, renderer)
def get_paths(self):
return self._paths
class EllipseCollection(Collection):
"""
A collection of ellipses, drawn using splines.
"""
def __init__(self, widths, heights, angles, units='points', **kwargs):
"""
*widths*: sequence
half-lengths of first axes (e.g., semi-major axis lengths)
*heights*: sequence
half-lengths of second axes
*angles*: sequence
angles of first axes, degrees CCW from the X-axis
*units*: ['points' | 'inches' | 'dots' | 'width' | 'height' | 'x' | 'y']
units in which majors and minors are given; 'width' and 'height'
refer to the dimensions of the axes, while 'x' and 'y'
refer to the *offsets* data units.
Additional kwargs inherited from the base :class:`Collection`:
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._widths = np.asarray(widths).ravel()
self._heights = np.asarray(heights).ravel()
self._angles = np.asarray(angles).ravel() *(np.pi/180.0)
self._units = units
self.set_transform(transforms.IdentityTransform())
self._transforms = []
self._paths = [mpath.Path.unit_circle()]
self._initialized = False
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def _init(self):
def on_dpi_change(fig):
self._transforms = []
self.figure.callbacks.connect('dpi_changed', on_dpi_change)
self._initialized = True
def set_transforms(self):
if not self._initialized:
self._init()
self._transforms = []
ax = self.axes
fig = self.figure
if self._units in ('x', 'y'):
if self._units == 'x':
dx0 = ax.viewLim.width
dx1 = ax.bbox.width
else:
dx0 = ax.viewLim.height
dx1 = ax.bbox.height
sc = dx1/dx0
else:
if self._units == 'inches':
sc = fig.dpi
elif self._units == 'points':
sc = fig.dpi / 72.0
elif self._units == 'width':
sc = ax.bbox.width
elif self._units == 'height':
sc = ax.bbox.height
elif self._units == 'dots':
sc = 1.0
else:
raise ValueError('unrecognized units: %s' % self._units)
_affine = transforms.Affine2D
for x, y, a in zip(self._widths, self._heights, self._angles):
trans = _affine().scale(x * sc, y * sc).rotate(a)
self._transforms.append(trans)
def draw(self, renderer):
if True: ###not self._transforms:
self.set_transforms()
return Collection.draw(self, renderer)
def get_paths(self):
return self._paths
class PatchCollection(Collection):
"""
A generic collection of patches.
This makes it easier to assign a color map to a heterogeneous
collection of patches.
This also may improve plotting speed, since PatchCollection will
draw faster than a large number of patches.
"""
def __init__(self, patches, match_original=False, **kwargs):
"""
*patches*
a sequence of Patch objects. This list may include
a heterogeneous assortment of different patch types.
*match_original*
If True, use the colors and linewidths of the original
patches. If False, new colors may be assigned by
providing the standard collection arguments, facecolor,
edgecolor, linewidths, norm or cmap.
If any of *edgecolors*, *facecolors*, *linewidths*,
*antialiaseds* are None, they default to their
:data:`matplotlib.rcParams` patch setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not
None (ie a call to set_array has been made), at draw time a
call to scalar mappable will be made to set the face colors.
"""
if match_original:
def determine_facecolor(patch):
if patch.fill:
return patch.get_facecolor()
return [0, 0, 0, 0]
facecolors = [determine_facecolor(p) for p in patches]
edgecolors = [p.get_edgecolor() for p in patches]
linewidths = [p.get_linewidths() for p in patches]
antialiaseds = [p.get_antialiased() for p in patches]
Collection.__init__(
self,
edgecolors=edgecolors,
facecolors=facecolors,
linewidths=linewidths,
linestyles='solid',
antialiaseds = antialiaseds)
else:
Collection.__init__(self, **kwargs)
paths = [p.get_transform().transform_path(p.get_path())
for p in patches]
self._paths = paths
def get_paths(self):
return self._paths
artist.kwdocd['Collection'] = patchstr = artist.kwdoc(Collection)
for k in ('QuadMesh', 'PolyCollection', 'BrokenBarHCollection', 'RegularPolyCollection',
'StarPolygonCollection', 'PatchCollection', 'CircleCollection'):
artist.kwdocd[k] = patchstr
artist.kwdocd['LineCollection'] = artist.kwdoc(LineCollection)
| agpl-3.0 |
YinongLong/scikit-learn | sklearn/linear_model/__init__.py | 83 | 3139 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .huber import HuberRegressor
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'HuberRegressor',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
appapantula/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 254 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
saketkc/bio-tricks | meme_parser/meme_processory.py | 1 | 2759 | #!/usr/bin/env python
"""
Process meme.txt files to
generate conservation plots
"""
import argparse
import csv
import sys
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats.stats import pearsonr
from Bio import motifs
def plot_meme_against_phylo(meme_record, phylo):
sns.set(style="darkgrid")
def position_wise_profile(counts_dict, length):
profile = map(dict, zip(*[[(k, v) for v in value] for k, value in counts_dict.items()]))
return profile
def find_max_occurence(profile, max_count=2):
sorted_profile = []
for p in profile:
sorted_profile.append(sorted(p.items(), key=lambda x:x[1]))
for i,p in enumerate(sorted_profile):
sorted_profile[i] = p[-max_count:]
return sorted_profile
def main(argv):
parser = argparse.ArgumentParser(description='Process meme files')
parser.add_argument('-i', '--meme', metavar='<meme_out>', help='Meme input file', required=True)
parser.add_argument('-m', '--motif', metavar='<motif_no>', help='Motif number', required=True, type=int)
parser.add_argument('-c', '--phylo', metavar='<phylo_out>', help='PhyloP conservation scores', required=True)
parsed = parser.parse_args(argv)
handle = open(parsed.meme)
records = motifs.parse(handle, 'meme')
record = records[parsed.motif-1]
phylo_data = csv.reader(open(parsed.phylo,'r'), delimiter='\t')
phylo_scores = []
for line in phylo_data:
phylo_scores.append(float(line[2]))
print "Motif length", record.length
print "phylo length", len(phylo_scores)
profile = position_wise_profile(record.counts, record.length)
max_occur = find_max_occurence(profile, max_count=1)
motif_scores = []
for position in max_occur:
motif_scores.append(position[0][1])
pr = pearsonr(np.array(motif_scores), np.array(phylo_scores))
print 'Pearson correlation: {}'.format(pr)
fig, ax = plt.subplots()
ax= sns.regplot(y=np.array(motif_scores), x=np.array(phylo_scores), scatter=True)
ax.set(ylabel="Count of most freq nucleotide", xlabel="PhyloP scores", title='CTCF | pearsonr = {}, p-val={}'.format(pr[0],pr[1]));
fig.savefig('{}_motif{}_scatter.png'.format(parsed.phylo, parsed.motif))
x = np.linspace(1,len(phylo_scores)+1,num=len(phylo_scores), endpoint=False)
f, (ax1, ax2) = plt.subplots(2, 1)
x1 = sns.barplot(x,y=np.array(motif_scores), ax=ax1)
x2 = sns.barplot(x,y=np.array(phylo_scores), ax=ax2)
x1.set(ylabel='Counts of most freq nucleotide', xlabel='Position in motif')
x2.set(ylabel='Phylop Score', xlabel='Position in motif')
f.tight_layout()
f.savefig('{}_motif{}_trend.png'.format(parsed.phylo, parsed.motif))
if __name__ == "__main__":
main(sys.argv[1:])
| mit |
rosenbrockc/live-serial | setup.py | 1 | 1733 | #!/usr/bin/env python
try:
from setuptools import setup
args = {}
except ImportError:
from distutils.core import setup
print("""\
*** WARNING: setuptools is not found. Using distutils...
""")
from setuptools import setup
try:
from pypandoc import convert
read_md = lambda f: convert(f, 'rst')
except ImportError:
print("warning: pypandoc module not found, could not convert Markdown to RST")
read_md = lambda f: open(f, 'r').read()
from os import path
setup(name='live-serial',
version='0.1.7',
description='Real-time serial port plotter/logger.',
long_description= "" if not path.isfile("README.md") else read_md('README.md'),
author='Conrad W Rosenbrock',
author_email='[email protected]',
url='https://github.com/rosenbrockc/live-serial',
license='MIT',
setup_requires=['pytest-runner',],
tests_require=['pytest', 'python-coveralls'],
install_requires=[
"argparse",
"termcolor",
"numpy",
"matplotlib",
"pyserial",
],
packages=['liveserial'],
scripts=['liveserial/livemon.py'],
package_data={'liveserial': []},
include_package_data=True,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
)
| mit |
CNS-OIST/PyPe9 | pype9/cmd/plot.py | 2 | 1904 | """
Simple tool for plotting the output of PyPe9 simulations using Matplotlib_.
Since Pype9 output is stored in Neo_ format, it can be used to plot generic
Neo_ files but it also includes handling of Pype9-specific annotations, such as
regime transitions.
"""
from argparse import ArgumentParser
from pype9.utils.arguments import existing_file
from pype9.utils.logging import logger # @UnusedImport
def argparser():
parser = ArgumentParser(prog='pype9 plot',
description=__doc__)
parser.add_argument('filename', type=existing_file,
help="Neo file outputted from a PyPe9 simulation")
parser.add_argument('--save', type=str, default=None,
help="Location to save the figure to")
parser.add_argument('--dims', type=int, nargs=2, default=(10, 8),
metavar=('WIDTH', 'HEIGHT'),
help="Dimensions of the plot")
parser.add_argument('--hide', action='store_true', default=False,
help="Whether to show the plot or not")
parser.add_argument('--resolution', type=float, default=300.0,
help="Resolution of the figure when it is saved")
return parser
def run(argv):
import neo
from pype9.exceptions import Pype9UsageError
args = argparser().parse_args(argv)
if args.hide:
import matplotlib # @IgnorePep8
matplotlib.use('Agg') # Set to use Agg so DISPLAY is not required
from pype9.plot import plot # @IgnorePep8
segments = neo.PickleIO(args.filename).read()
if len(segments) > 1:
raise Pype9UsageError(
"Expected only a single recording segment in file '{}', found {}."
.format(args.filename, len(segments)))
seg = segments[0]
plot(seg, dims=args.dims, show=not args.hide, resolution=args.resolution,
save=args.save)
| mit |
paninski-lab/yass | src/yass/visual/run.py | 1 | 76105 | import numpy as np
import scipy
from scipy.io import loadmat
import os
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.patches as mpatches
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
#from mpl_toolkits.axes_grid1.colorbar import colorbar
import yaml
from tqdm import tqdm
import parmap
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
import torch
from yass.correlograms_phy import compute_correlogram
from yass.merge.notch import notch_finder
from yass.visual.util import *
from yass.geometry import parse, find_channel_neighbors
from yass.template import align_get_shifts_with_ref, shift_chans
from yass.merge.merge import (template_dist_linear_align,
template_spike_dist_linear_align,
test_unimodality)
from yass.util import absolute_path_to_asset
from yass import read_config
from yass.reader import READER
def run():
"""Visualization Package
"""
CONFIG = read_config()
fname_templates = os.path.join(CONFIG.path_to_output_directory,
'templates.npy')
fname_spike_train = os.path.join(CONFIG.path_to_output_directory,
'spike_train.npy')
rf_dir = os.path.join(CONFIG.path_to_output_directory, 'rf')
fname_recording = os.path.join(CONFIG.path_to_output_directory,
'preprocess',
'standardized.bin')
fname_recording_yaml = os.path.join(CONFIG.path_to_output_directory,
'preprocess',
'standardized.yaml')
with open(fname_recording_yaml, 'r') as stream:
data_loaded = yaml.load(stream)
recording_dtype = data_loaded['dtype']
fname_geometry = os.path.join(CONFIG.data.root_folder, CONFIG.data.geometry)
sampling_rate = CONFIG.recordings.sampling_rate
save_dir = os.path.join(CONFIG.path_to_output_directory, 'visualize')
# only for yass
template_space_dir = absolute_path_to_asset('template_space')
deconv_dir = os.path.join(CONFIG.path_to_output_directory,
'deconv', 'final')
vis = Visualizer(fname_templates, fname_spike_train,
fname_recording, recording_dtype,
fname_geometry, sampling_rate, save_dir,
rf_dir, template_space_dir,
deconv_dir)
vis.population_level_plot()
vis.individiual_cell_plot()
class Visualizer(object):
def __init__(self, fname_templates, fname_spike_train,
fname_recording, recording_dtype,
CONFIG, save_dir, rf_dir=None,
fname_residual=None, residual_dtype=None,
fname_soft_assignment=None):
# saving directory location
self.save_dir = save_dir
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
self.tmp_folder = os.path.join(self.save_dir, 'tmp')
if not os.path.exists(self.tmp_folder):
os.makedirs(self.tmp_folder)
# necessary numbers
self.n_neighbours = 3
self.sampling_rate = CONFIG.recordings.sampling_rate
self.geom = CONFIG.geom
self.neigh_channels = CONFIG.neigh_channels
# load templates
self.templates = np.load(fname_templates)
if len(self.geom) == self.templates.shape[2]:
self.templates = self.templates.transpose(1, 2, 0)
self.n_times_templates, self.n_channels, self.n_units = self.templates.shape
# compute neighbors for each unit
self.compute_neighbours()
self.compute_propagation()
# load spike train and templates
self.spike_train = np.load(fname_spike_train)
self.unique_ids = np.unique(self.spike_train[:,1])
if fname_soft_assignment is not None:
self.soft_assignment = np.load(fname_soft_assignment)
else:
self.soft_assignment = np.ones(self.spike_train.shape[0], 'float32')
# compute firing rates
self.compute_firing_rates()
self.compute_xcorrs()
# recording readers
self.reader = READER(fname_recording, recording_dtype, CONFIG, 1)
# change spike size just in case
self.reader.spike_size = self.n_times_templates
if fname_residual is not None:
self.reader_resid = READER(fname_residual, residual_dtype, CONFIG, 1)
self.reader_resid.spike_size = self.n_times_templates
else:
self.reader_resid = None
# rf files
self.rf_dir = rf_dir
if rf_dir is not None:
self.STAs = np.load(os.path.join(rf_dir, 'STA_spatial.npy'))[:, :, :, 1]
self.STAs = np.flip(self.STAs, axis=2)
self.STAs_temporal = np.load(os.path.join(rf_dir, 'STA_temporal.npy'))
self.gaussian_fits = np.load(os.path.join(rf_dir, 'gaussian_fits.npy'))
self.cell_types = list(np.load(os.path.join(rf_dir, 'cell_types.npy')))
self.cell_types += ['No-Rf', 'Multiple-Rf']
if os.path.exists(os.path.join(rf_dir, 'labels_updated.npy')):
self.rf_labels = np.load(os.path.join(rf_dir, 'labels_updated.npy'))
else:
self.idx_single_rf = np.load(os.path.join(rf_dir, 'idx_single_rf.npy'))
self.idx_no_rf = np.load(os.path.join(rf_dir, 'idx_no_rf.npy'))
self.idx_multi_rf = np.load(os.path.join(rf_dir, 'idx_multi_rf.npy'))
self.rf_labels = np.load(os.path.join(rf_dir, 'labels.npy'))
self.rf_labels[self.idx_multi_rf] = len(self.cell_types) - 1
self.rf_labels[self.idx_no_rf] = len(self.cell_types) - 2
self.stim_size = np.load(os.path.join(rf_dir, 'stim_size.npy'))
max_t = np.argmax(np.abs(self.STAs_temporal[:,:,1]), axis=1)
self.sta_sign = np.sign(self.STAs_temporal[
np.arange(self.STAs_temporal.shape[0]),max_t, 1])
# also compute rf
self.compute_neighbours_rf()
# get colors
self.colors = colors = [
'black','blue','red','green','cyan','magenta','brown','pink',
'orange','firebrick','lawngreen','dodgerblue','crimson','orchid','slateblue',
'darkgreen','darkorange','indianred','darkviolet','deepskyblue','greenyellow',
'peru','cadetblue','forestgreen','slategrey','lightsteelblue','rebeccapurple',
'darkmagenta','yellow','hotpink']
self.cmap = cm = plt.cm.get_cmap('RdYlBu')
def compute_propagation(self):
ptps = self.templates.ptp(0)
shifts = np.zeros((self.n_units, self.n_channels))
for k in range(self.n_units):
temp = self.templates[:, :, k]
mc = temp.ptp(0).argmax()
arg_min = temp.argmin(0)
arg_min -= arg_min[mc]
vis_chan = np.where(temp.ptp(0) > 0.5)[0]
shifts[k][vis_chan] = arg_min[vis_chan]
#shifts[shifts < -5] = -5
self.shifts = shifts
self.max_shift = np.max(self.shifts)
self.min_shift = np.min(self.shifts)
self.min_shift = -10
def compute_firing_rates(self):
# COMPUTE FIRING RATES
n_chans = self.n_channels
samplerate = self.sampling_rate
self.rec_len = np.ptp(self.spike_train[:, 0])/samplerate
n_spikes_soft = np.zeros(self.n_units)
for j in range(self.spike_train.shape[0]):
n_spikes_soft[self.spike_train[j, 1]] += self.soft_assignment[j]
n_spikes_soft = n_spikes_soft.astype('int32')
self.f_rates = n_spikes_soft/self.rec_len
self.ptps = self.templates.ptp(0).max(0)
def compute_xcorrs(self):
self.window_size = 0.04
self.bin_width = 0.001
fname = os.path.join(self.tmp_folder, 'xcorrs.npy')
if os.path.exists(fname):
self.xcorrs = np.load(fname)
else:
self.xcorrs = compute_correlogram(
np.arange(self.n_units),
self.spike_train,
self.soft_assignment,
sample_rate=self.sampling_rate,
bin_width=self.bin_width,
window_size=self.window_size)
np.save(fname, self.xcorrs)
avg_frates = (self.f_rates[self.unique_ids][:, None]+self.f_rates[self.unique_ids][None])/2
self.xcorrs = self.xcorrs/avg_frates[:,:,None]/self.rec_len/self.bin_width
def compute_neighbours(self):
fname = os.path.join(self.tmp_folder, 'neighbours.npy')
if os.path.exists(fname):
self.nearest_units = np.load(fname)
else:
dist = template_dist_linear_align(self.templates.transpose(2,0,1))
nearest_units = []
for k in range(dist.shape[0]):
idx = np.argsort(dist[k])[1:self.n_neighbours+1]
nearest_units.append(idx)
self.nearest_units = np.array(nearest_units)
np.save(fname, self.nearest_units)
def compute_neighbours_rf(self):
std = np.median(np.abs(
self.STAs - np.median(self.STAs)))/0.6745
self.rf_std = std
fname = os.path.join(self.tmp_folder, 'neighbours_rf.npy')
if os.path.exists(fname):
self.nearest_units_rf = np.load(fname)
else:
th = std*0.5
STAs_th = np.copy(self.STAs)
STAs_th[np.abs(STAs_th) < th] = 0
STAs_th = STAs_th.reshape(self.n_units, -1)
STAs_th = STAs_th#*self.sta_sign[:, None]
norms = np.linalg.norm(STAs_th.T, axis=0)[:, np.newaxis]
cos = np.matmul(STAs_th, STAs_th.T)/np.matmul(norms, norms.T)
cos[np.isnan(cos)] = 0
nearest_units_rf = np.zeros((self.n_units, self.n_neighbours), 'int32')
for j in np.unique(self.rf_labels):
units_same_class = np.where(self.rf_labels == j)[0]
if len(units_same_class) > self.n_neighbours+1:
for k in units_same_class:
idx_ = np.argsort(cos[k][units_same_class])[::-1][1:self.n_neighbours+1]
nearest_units_rf[k] = units_same_class[idx_]
else:
for k in units_same_class:
other_units = units_same_class[units_same_class != k]
nearest_units_rf[k][:len(other_units)] = other_units
nearest_units_rf[k][len(other_units):] = k
self.nearest_units_rf = nearest_units_rf
np.save(fname, self.nearest_units_rf)
def compute_neighbours_xcorrs(self, unit):
xcorrs = self.xcorrs[np.where(
self.unique_ids == unit)[0][0]]
xcorrs = xcorrs[self.unique_ids != unit]
idx_others = self.unique_ids[self.unique_ids != unit]
sig_xcorrs = np.where(xcorrs.sum(1) > 10/(self.rec_len*self.bin_width*self.f_rates[unit]))[0]
xcorrs = xcorrs[sig_xcorrs]
idx_others = idx_others[sig_xcorrs]
means_ = xcorrs.mean(1)
stds_ = np.std(xcorrs, 1)
stds_[stds_==0] = 1
xcorrs = (xcorrs - means_[:,None])/stds_[:,None]
idx_max = np.argsort(xcorrs.max(1))[::-1][:self.n_neighbours]
max_vals = xcorrs.max(1)[idx_max]
idx_max = idx_others[idx_max]
idx_min = np.argsort(xcorrs.min(1))[:self.n_neighbours]
min_vals = xcorrs.min(1)[idx_min]
idx_min = idx_others[idx_min]
return idx_max, max_vals, idx_min, min_vals
def population_level_plot(self):
self.fontsize = 20
self.make_raster_plot()
self.make_firing_rate_plot()
self.make_normalized_templates_plot()
if self.rf_dir is not None:
#self.make_rf_plots()
self.cell_classification_plots()
else:
self.make_all_templates_summary_plots()
if self.reader_resid is not None:
#self.residual_varaince()
self.add_residual_qq_plot()
self.add_raw_resid_snippets()
def individiual_level_plot(self, units_full_analysis=None, sample=False,
plot_all=True, plot_summary=True, divide_by_cell_types=True):
# saving directory location
self.save_dir_ind = os.path.join(
self.save_dir, 'individual')
if not os.path.exists(self.save_dir_ind):
os.makedirs(self.save_dir_ind)
if divide_by_cell_types:
for cell_type in self.cell_types:
dir_tmp = os.path.join(
self.save_dir_ind, cell_type)
if not os.path.exists(dir_tmp):
os.makedirs(dir_tmp)
if plot_summary:
self.make_all_rf_templates_plots()
# which units to do full analysis
if units_full_analysis is None:
units_full_analysis = np.arange(self.n_units)
else:
units_full_analysis = np.array(units_full_analysis)
# random sample if requested
n_sample = 100
if sample and (len(units_full_analysis) > n_sample):
fname_units = os.path.join(
self.tmp_folder,
'units_full_analysis_individual_plot.npy')
if os.path.exists(fname_units):
units_full_analysis = np.load(fname_units)
else:
units_full_analysis = np.random.choice(
units_full_analysis, n_sample, False)
np.save(fname_units, units_full_analysis)
full_analysis = np.zeros(self.n_units, 'bool')
full_analysis[units_full_analysis] = True
full_analysis = list(full_analysis)
names = []
if plot_all:
units_in = np.arange(self.n_units)
else:
units_in = np.copy(units_full_analysis)
# file names
for unit in units_in:
ptp = str(int(np.round(self.ptps[unit]))).zfill(3)
name = 'ptp_{}_unit_{}'.format(ptp, unit)
names.append(name)
if False:
parmap.map(self.make_individiual_level_plot,
list(units_in),
names,
full_analysis[units_in],
processes=3,
pm_pbar=True)
else:
for ii in tqdm(range(len(units_in))):
self.make_individiual_level_plot(units_in[ii],
names[ii],
full_analysis[units_in[ii]],
divide_by_cell_types
)
def make_individiual_level_plot(self,
unit,
name,
full_analysis=True,
divide_by_cell_types=True
):
# cell type
cell_type = self.cell_types[self.rf_labels[unit]]
if divide_by_cell_types:
# save directory
save_dir = os.path.join(self.save_dir_ind, cell_type)
else:
save_dir = self.save_dir_ind
# template
fname = os.path.join(save_dir, name+'_p0_template.png')
self.make_template_plot(unit, fname)
if full_analysis:
# waveform plots
fname = os.path.join(save_dir, name+'_p1_wfs.png')
self.make_waveforms_plot(unit, fname)
# template neighbors plots
neighbor_units = self.nearest_units[unit]
fname = os.path.join(save_dir, name+'_p2_temp_neigh.png')
title = 'Unit {} ({}), Template Space Neighbors'.format(
unit, cell_type)
self.make_neighbors_plot(unit, neighbor_units, fname, title)
# rf neighbors plots
if np.max(np.abs(self.STAs[unit])) > 1.5*self.rf_std:
neighbor_units = self.nearest_units_rf[unit]
fname = os.path.join(save_dir, name+'_p3_rf_neigh.png')
title = 'Unit {} ({}), RF Space Neighbors'.format(
unit, cell_type)
self.make_neighbors_plot(unit, neighbor_units, fname, title)
# xcorr neighbours
if self.f_rates[unit] > 0.5:
(idx_max, max_vals,
idx_min, min_vals) = self.compute_neighbours_xcorrs(unit)
fname = os.path.join(save_dir, name+'_p4_high_xcorr_neigh.png')
title = 'Unit {} ({}), Xcor Space Neighbors'.format(
unit, cell_type)
self.make_neighbors_plot(unit, idx_max, fname, title)
if np.min(min_vals) < -10:
fname = os.path.join(save_dir, name+'_p5_xcorr_notches.png')
title = 'Unit {} ({}), Xcor Notches'.format(
unit, cell_type)
self.make_neighbors_plot(unit, idx_min, fname, title)
def make_template_plot(self, unit, fname):
if os.path.exists(fname):
return
# determin channels to include
ptp = self.templates[:, :, unit].ptp(0)
mc = ptp.argmax()
vis_chan = np.where(ptp > 2)[0]
if len(vis_chan) == 0:
vis_chan = [mc]
geom_vis_chan = self.geom[vis_chan]
max_x, max_y = np.max(geom_vis_chan, 0)
min_x, min_y = np.min(geom_vis_chan, 0)
chan_idx = np.logical_and(
np.logical_and(self.geom[:,0] >= min_x-3, self.geom[:,0] <= max_x+3),
np.logical_and(self.geom[:,1] >= min_y-3, self.geom[:,1] <= max_y+3))
chan_idx = np.where(chan_idx)[0]
chan_idx = chan_idx[ptp[chan_idx] > 1]
# also include neighboring channels
neigh_chans = np.where(self.neigh_channels[mc])[0]
chan_idx = np.unique(np.hstack((chan_idx, neigh_chans)))
# plotting parameters
self.fontsize = 40
fig = plt.figure(figsize=[30, 10])
gs = gridspec.GridSpec(1, 1, fig)
# add template summary plot
cell_type = self.cell_types[self.rf_labels[unit]]
fr = str(np.round(self.f_rates[unit], 1))
ptp = str(np.round(self.ptps[unit], 1))
title = "Template of Unit {}, {}Hz, {}SU, Max Channel: {}".format(unit, fr, ptp, mc)
gs = self.add_template_plot(gs, 0, 0,
[unit], [self.colors[0]],
chan_idx, title)
plt.tight_layout()
fig.savefig(fname, bbox_inches='tight', dpi=100)
fig.clf()
plt.close('all')
gs = None
def make_waveforms_plot(self, unit, fname):
if os.path.exists(fname):
return
n_waveforms = 1000
n_examples = 100
fontsize = 20
wf, wf_resid, spt, neigh_chans = self.get_waveforms(unit, n_waveforms)
if wf.shape[0] == 0:
return
template = self.templates[:,:,unit][:, neigh_chans]
spt = spt/self.sampling_rate
spikes_ptp = self.get_spikes_ptp(wf, template)
spikes_ptp_clean = self.get_spikes_ptp(wf_resid, template)
template_ptp = template[:, template.ptp(0).argmax()].ptp()
n_rows = 3
if self.reader_resid is not None:
n_rows += 2
n_cols = len(neigh_chans)
chan_order = np.argsort(template.ptp(0))[::-1]
if n_examples < wf.shape[0]:
idx_plot = np.random.choice(wf.shape[0], n_examples, False)
else:
idx_plot = np.arange(wf.shape[0])
plt.figure(figsize=(n_cols*4, n_rows*4))
# add raw wfs
count = 0
x_range = np.arange(wf.shape[1])/self.sampling_rate*1000
for j, c in enumerate(chan_order):
count += 1
plt.subplot(n_rows, n_cols, count)
plt.plot(x_range, wf[:, :, c][idx_plot].T, color='k', alpha=0.1)
plt.plot(x_range, template[:,c], color='r', linewidth=2)
title = "Channel: {}".format(neigh_chans[c])
if j == 0:
title = 'Raw Waveforms\n' + title
plt.title(title, fontsize=fontsize)
if j == 0:
plt.xlabel('Time (ms)', fontsize=fontsize)
plt.ylabel('Voltage (S.U.)', fontsize=fontsize)
if wf_resid is None:
count = 2
else:
# add clean wfs
for j, c in enumerate(chan_order):
count += 1
plt.subplot(n_rows, n_cols, count)
plt.plot(x_range, wf_resid[:, :, c][idx_plot].T, color='k', alpha=0.1)
plt.plot(x_range, template[:,c], color='r', linewidth=2)
if j == 0:
title = 'Clean Waveforms'
plt.title(title, fontsize=fontsize)
plt.xlabel('Time (ms)', fontsize=fontsize)
plt.ylabel('Voltage (S.U.)', fontsize=fontsize)
# add residual variance
residual_std = np.std(wf_resid, axis=0)
max_std = np.max(residual_std)
min_std = np.min(residual_std)
for j, c in enumerate(chan_order):
count += 1
plt.subplot(n_rows, n_cols, count)
plt.plot(x_range, residual_std[:, c], color='k', linewidth=2)
plt.ylim([0.95*min_std, 1.05*max_std])
if j == 0:
title = 'STD. of Residuals'
plt.title(title, fontsize=fontsize)
plt.xlabel('Time (ms)', fontsize=fontsize)
plt.ylabel('STD', fontsize=fontsize)
count = 4
# ptp vs spike times
plt.subplot(n_rows, 1, count)
plt.scatter(spt, spikes_ptp, c='k')
plt.plot([np.min(spt), np.max(spt)], [template_ptp, template_ptp], 'r')
plt.title('ptp vs spike times (red = template ptp)\n Raw Waveforms',
fontsize=fontsize)
plt.xlabel('Time (seconds)', fontsize=fontsize)
plt.ylabel('PTP (S.U.)', fontsize=fontsize)
# ptp vs spike times (clean waveforms)
plt.subplot(n_rows, 1, count+1)
plt.scatter(spt, spikes_ptp_clean, c='k')
plt.plot([np.min(spt), np.max(spt)], [template_ptp, template_ptp], 'r')
plt.title('Clean Waveforms', fontsize=fontsize)
plt.xlabel('Time (seconds)', fontsize=fontsize)
plt.ylabel('PTP (S.U.)', fontsize=fontsize)
# suptitle
fr = np.round(float(self.f_rates[unit]), 1)
ptp = np.round(float(self.ptps[unit]), 1)
suptitle = 'Unit: {}, {}Hz, {}SU'.format(unit, fr, ptp)
suptitle = suptitle + ', ' + self.cell_types[self.rf_labels[unit]]
plt.suptitle(suptitle,
fontsize=int(1.5*fontsize))
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig(fname, bbox_inches='tight')
plt.clf()
plt.close('all')
def make_neighbors_plot(self, unit, neighbor_units, fname, title):
if os.path.exists(fname):
return
mc = self.templates[:, :, unit].ptp(0).argmax()
chan_idx = np.where(self.neigh_channels[mc])[0]
zoom_windows = zoom_in_window(self.STAs, unit, self.rf_std*1.5)
if zoom_windows is not None:
col_minus = 0
else:
col_minus = 1
# plotting parameters
self.fontsize = 30
self.figsize = [int(6*(8-col_minus)), 25]
fig = plt.figure(figsize=self.figsize)
fig.suptitle(title, fontsize=2*self.fontsize)
gs = gridspec.GridSpec(self.n_neighbours+2, 8-col_minus, fig,
left=0, right=1, top=0.92, bottom=0.05,
hspace=0.2, wspace=0.1)
start_row = 0
# add template summary plot
fr = str(np.round(self.f_rates[unit], 1))
ptp = str(np.round(self.ptps[unit], 1))
title = "Unit: {}, {}Hz, {}SU".format(unit, fr, ptp)
gs = self.add_template_summary(
gs, start_row, slice(2), unit, title)
# add template
title = 'Zoomed-in Templates'
gs = self.add_template_plot(gs, start_row, 2,
[unit], [self.colors[0]],
chan_idx, title)
# add rf
title = 'Spatial RF'
gs = self.add_RF_plot(gs, start_row, 3, unit, None, title)
if zoom_windows is not None:
title = 'Zoomed-in Spatial RF'
gs = self.add_RF_plot(gs, start_row, 4,
unit, zoom_windows, title)
# add temporal sta
title = 'Temporal RF'
gs = self.add_temporal_sta(gs, start_row, 5-col_minus,
unit, title)
# add autocorrelogram
title = 'Autocorrelogram'
gs = self.add_xcorr_plot(gs, start_row, 6-col_minus, unit, unit, title)
start_row += 1
## Neighbor Units by templates ##
for ctr, neigh in enumerate(neighbor_units):
fr = str(np.round(self.f_rates[neigh], 1))
ptp = str(np.round(self.ptps[neigh], 1))
title = "Unit: {}, {}Hz, {}SU".format(neigh, fr, ptp)
gs = self.add_template_summary(
gs, ctr+start_row, slice(2), neigh, title)
gs = self.add_template_plot(
gs, ctr+start_row, 2,
np.hstack((unit, neigh)),
[self.colors[c] for c in [0,ctr+1]],
chan_idx
)
gs = self.add_RF_plot(gs, ctr+start_row, 3, neigh)
if zoom_windows is not None:
gs = self.add_RF_plot(gs, ctr+start_row, 4, neigh, zoom_windows)
if ctr == len(neighbor_units)-1:
add_label = True
else:
add_label = False
gs = self.add_temporal_sta(
gs, ctr+start_row, 5-col_minus, neigh, None, add_label)
if ctr == 0:
title = 'Cross-correlogram'
else:
title = None
gs = self.add_xcorr_plot(gs, ctr+start_row, 6-col_minus,
unit, neigh, title, add_label)
if self.reader_resid is not None:
if ctr == 0:
title = 'Histogram of\nLDA Projection of\nSpikes-to-Templates\nDistance'
else:
title = None
if ctr == len(neighbor_units)-1:
add_label = True
else:
add_label = False
gs = self.add_l2_feature_plot(gs, ctr+start_row, 7-col_minus, unit, neigh,
[self.colors[c] for c in [0,ctr+1]],
title, add_label)
# add contour plots
title = 'Contours of Spatial RF'
gs = self.add_contour_plot(
gs, self.n_neighbours+start_row, 3,
np.hstack((unit, neighbor_units)),
self.colors[:self.n_neighbours+1], None, title)
if zoom_windows is not None:
title = 'zoomed-in Contours'
gs = self.add_contour_plot(
gs, self.n_neighbours+start_row, 4,
np.hstack((unit, neighbor_units)),
self.colors[:self.n_neighbours+1],
True, title)
#plt.tight_layout(rect=[0, 0.03, 1, 0.93])
fig.savefig(fname, bbox_inches='tight', dpi=100)
fig.clf()
plt.close('all')
gs = None
def pairwise_plot(self, pairs):
# saving directory location
save_dir_ind = os.path.join(self.save_dir,'pairs')
if not os.path.exists(save_dir_ind):
os.makedirs(save_dir_ind)
max_pairs = 20
count = -1
n_pages = 1
# plotting parameters
self.fontsize = 20
self.figsize = [60, 100]
fig=plt.figure(figsize=self.figsize)
gs = gridspec.GridSpec(max_pairs, 6, fig)
checked = np.zeros((self.n_units, self.n_units), 'bool')
for ii in tqdm(range(len(pairs))):
unit1 = pairs[ii][0]
unit2 = pairs[ii][1]
if not checked[unit1, unit2]:
count += 1
checked[unit1, unit2] = 1
checked[unit2, unit1] = 1
gs = self.add_template_plot(gs, count, slice(0,2),
np.hstack((unit1, unit2)),
self.colors[:2])
gs = self.add_RF_plot(gs, count, 2, unit1)
gs = self.add_RF_plot(gs, count, 3, unit2)
gs = self.add_xcorr_plot(gs, count, 4, unit1, unit2)
if self.fname_residual is not None:
gs = self.add_l2_feature_plot(gs, count, 5, unit1, unit2, self.colors[:2])
if count == max_pairs or ii == (len(pairs)-1):
fname = os.path.join(save_dir_ind, 'page_{}.png'.format(n_pages))
fig.savefig(fname, bbox_inches='tight', dpi=100)
plt.close()
if ii < len(pairs)-1:
fig=plt.figure(figsize=self.figsize)
gs = gridspec.GridSpec(max_pairs, 6, fig)
count = 0
n_pages += 1
fname = os.path.join(save_dir_ind, 'page_{}.png'.format(n_pages))
fig.savefig(fname, bbox_inches='tight', dpi=100)
fig.clf()
fig.cla()
plt.close('all')
def get_waveforms(self, unit, n_examples=200):
idx = np.where(self.spike_train[:,1]==unit)[0]
spt = self.spike_train[idx, 0]
prob = self.soft_assignment[idx]
if np.sum(prob) < 1:
return np.zeros((0,1,1)), None, None, None
spt = np.random.choice(spt,
np.min((n_examples, int(np.sum(prob)))),
False, prob/np.sum(prob))
mc = self.templates[:, :, unit].ptp(0).argmax()
neigh_chans = np.where(self.neigh_channels[mc])[0]
temp = self.templates[:, :, unit][:, mc]
wf, skipped_idx = self.reader.read_waveforms(spt, self.n_times_templates, neigh_chans)
mc_neigh = np.where(neigh_chans == mc)[0][0]
shifts = align_get_shifts_with_ref(
wf[:, :, mc_neigh], temp)
wf = shift_chans(wf, shifts)
spt = np.delete(spt, skipped_idx)
if self.reader_resid is not None:
wf_resid, _ = self.reader_resid.read_waveforms(spt, self.n_times_templates, neigh_chans)
wf_resid = wf_resid + self.templates[:, :, unit][:, neigh_chans][None]
shifts = align_get_shifts_with_ref(
wf_resid[:, :, mc_neigh], temp)
wf_resid = shift_chans(wf_resid, shifts)
else:
wf_resid = None
return wf, wf_resid, spt, neigh_chans
def get_clean_waveforms(self, unit, n_examples=200, spt=None):
if spt is None:
idx = np.where(self.spike_train[:,1]==unit)[0]
idx = np.random.choice(idx,
np.min((n_examples, len(idx))),
False)
spt = self.spike_train[idx,0] - self.templates.shape[0]//2
mc = self.templates[:, :, unit].ptp(0).argmax()
neigh_chans = np.where(self.neigh_channels[mc])[0]
wf_res, idx_skipped = binary_reader_waveforms(
self.fname_residual,
self.n_channels,
self.templates.shape[0],
spt, neigh_chans)
spt = np.delete(spt, idx_skipped)
wf = wf_res + self.templates[:, :, unit][:, neigh_chans]
mc_neigh = np.where(neigh_chans == mc)[0][0]
shifts = align_get_shifts_with_ref(
wf[:, :, mc_neigh])
wf = shift_chans(wf, shifts)
return wf, spt
def add_example_waveforms(self, gs, x_loc, unit):
wf, spt = self.get_waveforms(unit)
if wf.shape[0] > 0:
mc = self.templates[:, :, unit].ptp(0).argmax()
neigh_chans = np.where(self.neigh_channels[mc])[0]
order_neigh_chans = np.argsort(
np.linalg.norm(self.geom[neigh_chans] - self.geom[[mc]], axis=1))
for ii, j in enumerate(order_neigh_chans):
chan = neigh_chans[j]
ax = plt.subplot(gs[x_loc, ii])
ax.plot(wf[:, :, j].T, color='k', alpha=0.1)
ax.plot(self.templates[:, chan, unit].T, color='r', linewidth=2)
title = "Channel: {}".format(chan)
if ii == 0:
title = 'Raw Waveforms, Unit: {}, '.format(unit) + title
ax.set_title(title, fontsize=self.fontsize)
return gs, wf, spt
def add_denoised_waveforms(self, gs, x_loc, unit, wf=None):
if wf is None:
wf, spt = self.get_waveforms(unit)
if wf.shape[0] > 0:
n_data, n_times, n_chans = wf.shape
denoised_wf = np.zeros((n_data, n_times, n_chans))
n_times_deno = 61
n_times_diff = (n_times - n_times_deno)//2
wf = wf[:, n_times_diff:-n_times_diff]
wf_reshaped = wf.transpose(0, 2, 1).reshape(-1, n_times_deno)
wf_torch = torch.FloatTensor(wf_reshaped).cuda()
denoised_wf_short = self.denoiser(wf_torch)[0].reshape(
n_data, n_chans, n_times_deno)
denoised_wf_short = denoised_wf_short.cpu().data.numpy().transpose(0, 2, 1)
denoised_wf[:, n_times_diff:-n_times_diff] = denoised_wf_short
mc = self.templates[:, :, unit].ptp(0).argmax()
neigh_chans = np.where(self.neigh_channels[mc])[0]
order_neigh_chans = np.argsort(
np.linalg.norm(self.geom[neigh_chans] - self.geom[[mc]], axis=1))
for ii, j in enumerate(order_neigh_chans):
chan = neigh_chans[j]
ax = plt.subplot(gs[x_loc, ii])
ax.plot(denoised_wf[:, :, j].T, color='k', alpha=0.1)
ax.plot(self.templates[:, chan, unit].T, color='r', linewidth=2)
if ii == 0:
title = 'Denoised Waveforms'
ax.set_title(title, fontsize=self.fontsize)
return gs
def add_residual_template(self, gs, x_loc, unit, spt=None):
mc = self.templates[:, :, unit].ptp(0).argmax()
neigh_chans = np.where(self.neigh_channels[mc])[0]
wf, spt = self.get_clean_waveforms(unit, spt=spt)
if wf.shape[0] > 0:
order_neigh_chans = np.argsort(
np.linalg.norm(self.geom[neigh_chans] - self.geom[[mc]], axis=1))
for ii, j in enumerate(order_neigh_chans):
chan = neigh_chans[j]
ax = plt.subplot(gs[x_loc, ii])
ax.plot(wf[:, :, j].T, color='k', alpha=0.1)
ax.plot(self.templates[:, chan, unit].T, color='r', linewidth=2)
if ii == 0:
title = 'Residual + Template'
ax.set_title(title, fontsize=self.fontsize)
return gs
def determine_channels_in(self, unit, n_max_chans = 30):
temp = self.templates[:, :, unit]
ptp = temp.ptp(0)
mc = ptp.argmax()
dist_to_mc = np.linalg.norm(
self.geom - self.geom[mc], axis=1)
idx_tmp = np.argsort(dist_to_mc)[:n_max_chans]
max_dist = dist_to_mc[idx_tmp].max()
chans_plot = np.where(dist_to_mc <= max_dist)[0]
return chans_plot
chans_plot = []
n_vis_chans = 1
while len(chans_plot) < n_max_chans:
n_vis_chans += 1
idx_chan = np.argsort(ptp)[::-1][:n_vis_chans]
center = np.mean(self.geom[idx_chan], axis=0)
max_dist = np.linalg.norm(self.geom[idx_chan] - center, axis=1).max()
chans_plot = np.where(np.linalg.norm(self.geom - center, axis=1) <= max_dist)[0]
n_vis_chans -= 1
idx_chan = np.argsort(ptp)[::-1][:n_vis_chans]
center = np.mean(self.geom[idx_chan], axis=0)
max_dist = np.linalg.norm(self.geom[idx_chan] - center, axis=1).max()
chans_plot = np.where(np.linalg.norm(self.geom - center, axis=1) <= max_dist)[0]
return chans_plot
def add_template_plot(self, gs, x_loc, y_loc, units, colors, chan_idx=None, title=None):
if chan_idx is None:
chan_idx = np.arange(self.n_channels)
# plotting parameters
time_scale=1.8
max_ptp = np.max(self.templates[:, :, units][:, chan_idx].ptp(0))
scale= 100/max_ptp
alpha=0.4
R = self.templates.shape[0]
ax = plt.subplot(gs[x_loc, y_loc])
for ii, unit in enumerate(units):
ax.plot(self.geom[chan_idx, 0]+np.arange(-R, 0)[:,np.newaxis]/time_scale,
self.geom[chan_idx, 1] + self.templates[:, :, unit][:, chan_idx]*scale,
color=colors[ii], linewidth=2)
# add channel number
#for k in chan_idx:
# ax.text(self.geom[k,0]+1, self.geom[k,1], str(k), fontsize=self.fontsize)
# add +-1 su grey shade
x = np.arange(-R,0)/time_scale
y = -np.ones(x.shape)*scale
for k in chan_idx:
ax.fill_between(x+self.geom[k,0],
y+self.geom[k,1],
y+2*scale+ self.geom[k,1], color='grey', alpha=0.1)
plt.yticks([])
plt.xticks([])
if title is not None:
ax.set_title(title, fontsize=self.fontsize)
return gs
def add_template_summary(self, gs, x_loc, y_loc, unit, title=None, add_color_bar=True, scale=50):
temp = self.templates[:,:,unit]
ptp = temp.ptp(0)
min_point = self.shifts[unit]
vis_chan = ptp > 1
ptp = ptp[vis_chan]
min_point = min_point[vis_chan]
ax = plt.subplot(gs[x_loc, y_loc])
plt.scatter(self.geom[vis_chan, 0],
self.geom[vis_chan, 1],
s=ptp*scale, c=min_point,
vmin=self.min_shift,
vmax=self.max_shift,
cmap=self.cmap
)
np.max(self.geom[:,0]) + 30
plt.xlim([np.min(self.geom[:,0]) - 30, np.max(self.geom[:,0]) + 30])
plt.ylim([np.min(self.geom[:,1]) - 30, np.max(self.geom[:,1]) + 30])
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
ax.tick_params(axis='both', which='both', length=0)
#if add_color_bar:
if False:
cbar = plt.colorbar(pad=0.01, fraction=0.05)
#ticks = cbar.get_ticks()
ticks = np.arange(self.min_shift, self.max_shift+1, 20)
ticklabels = np.round((
ticks/self.sampling_rate*1000).astype('float32'), 1)
cbar.set_ticks(ticks)
cbar.set_ticklabels(ticklabels)
cbar.ax.tick_params(labelsize=self.fontsize)
if title is not None:
ax.set_title(title, fontsize=self.fontsize)
# add channel number
#for k in np.arange(self.n_channels):
# plt.text(self.geom[k,0]+1, self.geom[k,1], str(k), fontsize=self.fontsize//3)
return gs
def add_RF_plot(self, gs, x_loc, y_loc, unit, windows=None, title=None):
# COMPUTE
#neighbor_units = self.nearest_units[unit]
ax = plt.subplot(gs[x_loc, y_loc])
img = self.STAs[unit].T #*self.sta_sign[unit]
vmax = np.max(np.abs(img))
vmin = -vmax
ax.imshow(img, vmin=vmin, vmax=vmax)
if windows is not None:
ax.set_xlim([windows[0][0], windows[0][1]])
ax.set_ylim([windows[1][0], windows[1][1]])
else:
ax.set_xlim([0,self.stim_size[0]])
ax.set_ylim([0,self.stim_size[1]])
if title is not None:
ax.set_title(title, fontsize=self.fontsize)
ax.set_axis_off()
# also plot all in one plot
#ax = plt.subplot(gs[self.n_neighbours+1, ax_col])
#ax = self.plot_contours(ax, np.hstack((unit,neighbor_units)),
# self.colors[:self.n_neighbours+1])
return gs
def add_contour_plot(self, gs, x_loc, y_loc, units, colors, zoom_in=False, title=None, windows=None, legend=True):
ax = plt.subplot(gs[x_loc, y_loc])
labels = []
x_min = None
for ii, unit in enumerate(units):
# also plot all in one plot
if np.any(self.gaussian_fits[unit] != 0):
plotting_data = self.get_circle_plotting_data(
unit, self.gaussian_fits)
ax.plot(plotting_data[1],plotting_data[0],
color=colors[ii], linewidth=3)
x_min_, x_max_ = np.min(plotting_data[1]), np.max(plotting_data[1])
y_min_, y_max_ = np.min(plotting_data[0]), np.max(plotting_data[0])
if x_min is None:
x_min, x_max = x_min_, x_max_
y_min, y_max = y_min_, y_max_
else:
x_min = np.min((x_min_, x_min))
x_max = np.max((x_max_, x_max))
y_min = np.min((y_min_, y_min))
y_max = np.max((y_max_, y_max))
labels.append(mpatches.Patch(color = colors[ii], label = "Unit {}".format(unit)))
if legend:
ax.legend(handles=labels)
if zoom_in and (windows is None):
ax.set_xlim([x_min-1, x_max+1])
ax.set_ylim([y_min-1, y_max+1])
elif zoom_in and (windows is not None):
ax.set_xlim([windows[0][0], windows[0][1]])
ax.set_ylim([windows[1][0], windows[1][1]])
else:
ax.set_xlim([0,self.stim_size[0]])
ax.set_ylim([0,self.stim_size[1]])
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
ax.tick_params(axis='both', which='both', length=0)
if title is not None:
ax.set_title(title, fontsize=self.fontsize)
return gs
def add_temporal_sta(self, gs, x_loc, y_loc, unit, title=None, add_label=False):
ax = plt.subplot(gs[x_loc, y_loc])
lw = 3
sta = self.STAs_temporal[unit]
ax.plot(sta[:,0], 'r', linewidth=lw)
ax.plot(sta[:,1], 'g', linewidth=lw)
ax.plot(sta[:,2], 'b', linewidth=lw)
plt.yticks([])
#plt.xticks([])
if title is not None:
ax.set_title(title, fontsize=self.fontsize)
if add_label:
ax.set_xlabel('time (frames)', fontsize=self.fontsize)
return gs
def add_ptp_vs_time(self, ptp, spt, template):
plt.scatter(spt, ptp/self.sampling_rate, c='k')
plt.plot([np.min(spt), np.max(spt)], [temp.ptp(), temp.ptp()], 'r')
#plt.eventplot(spt, color='k', linewidths=0.01)
plt.title('ptp vs spike times (red = template ptp)',
fontsize=self.fontsize)
return gs
def get_spikes_ptp(self, wf, template):
mc = template.ptp(0).argmax()
temp = template[:, mc]
min_point = temp.argmin()
max_point = temp.argmax()
first_point = np.min((min_point, max_point))
second_point = np.max((min_point, max_point))
window = np.arange(np.max((first_point-2, 0)),
np.min((second_point+3, len(temp))))
ptp_spikes = wf[:, :, mc][:, window].ptp(1)
return ptp_spikes
def get_circle_plotting_data(self,i_cell,Gaussian_params):
# Adapted from Nora's matlab code, hasn't been tripled checked
circle_samples = np.arange(0,2*np.pi,0.05)
x_circle = np.cos(circle_samples)
y_circle = np.sin(circle_samples)
# Get Gaussian parameters
angle = -Gaussian_params[i_cell,5]
sd = Gaussian_params[i_cell,3:5]
x_shift = self.stim_size[1] - Gaussian_params[i_cell,1]
y_shift = Gaussian_params[i_cell,2]
R = np.asarray([[np.cos(angle), np.sin(angle)],[-np.sin(angle), np.cos(angle)]])
L = np.asarray([[sd[0], 0],[0, sd[1]]])
circ = np.concatenate([x_circle.reshape((-1,1)),y_circle.reshape((-1,1))],axis=1)
X = np.dot(R,np.dot(L,np.transpose(circ)))
X[0] = X[0]+x_shift
X[1] = np.abs(X[1]+y_shift)
plotting_data = X
return plotting_data
def add_xcorr_plot(self, gs, x_loc, y_loc, unit1, unit2, title=None, add_label=False):
# COMPUTE XCORRS w. neighbouring units;
if (unit1 in self.unique_ids) and (unit2 in self.unique_ids):
unit1_ = np.where(self.unique_ids == unit1)[0][0]
unit2_ = np.where(self.unique_ids == unit2)[0][0]
result = self.xcorrs[unit1_, unit2_]
else:
result = np.zeros(self.xcorrs.shape[2])
window_size_ms = self.window_size*1000
bin_width_ms = self.bin_width*1000
x_range = np.arange(-(window_size_ms//2),
window_size_ms//2+1, bin_width_ms)
#notch, pval1 = notch_finder(result)
#pval1 = np.round(pval1, 2)
ax = plt.subplot(gs[x_loc, y_loc])
plt.plot(x_range, result,color='black', linewidth=2)
#y_max = np.max((10, 1.5*np.max(result)))
y_max = 1.5*np.max(result)
plt.ylim(0, y_max)
plt.plot([0,0],[0, y_max],'r--')
if add_label:
plt.xlabel('time (ms)', fontsize=self.fontsize)
#plt.ylabel('rates (Hz)', fontsize=self.fontsize)
#plt.ylabel('counts', fontsize=self.fontsize)
plt.tick_params(axis='both', which='major', labelsize=self.fontsize)
if title is not None:
plt.title(title, fontsize=self.fontsize)
return gs
def add_l2_feature_plot(self, gs, x_loc, y_loc, unit1, unit2, colors, title=None, add_label=False):
#n_samples = 5000
l2_features, spike_ids = get_l2_features(
self.reader_resid, self.spike_train,
self.templates.transpose(2,0,1),
self.soft_assignment,
unit1, unit2)
if l2_features is None:
return gs
lda = LDA(n_components = 1)
feat = lda.fit_transform(l2_features, spike_ids).ravel()
#try:
#(merge,
# lda_prob,
# dp_val) = test_merge(l2_features, spike_ids)
#l2_1d_features = np.diff(l2_features, axis=0)[0]
n_bins = np.max((int(len(feat)/20), 1))
steps = (np.max(feat) - np.min(feat))/n_bins
bins = np.arange(np.min(feat), np.max(feat)+steps, steps)
ax = plt.subplot(gs[x_loc, y_loc])
plt.hist(feat, bins, color='slategrey')
plt.hist(feat[spike_ids==0], bins, color=colors[0], alpha=0.7)
plt.hist(feat[spike_ids==1], bins, color=colors[1], alpha=0.7)
#ax.tick_params(labelsize=self.fontsize)
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
ax.tick_params(axis='both', which='both', length=0)
if add_label:
plt.xlabel('LDA Projection', fontsize=self.fontsize)
#plt.title(
# 'Dip Test: {}'.format(np.round(dp_val,4)),
# fontsize=self.fontsize)
#except:
# print ("Diptest error for unit {} and {} with size {}".format(
# unit1, unit2, l2_features.shape[0]))
if title is not None:
plt.title(title, fontsize=self.fontsize)
return gs
def add_full_sta(self, gs, x_locs, y_locs, unit):
fname = os.path.join(self.rf_dir, 'tmp', 'sta', 'unit_{}.mat'.format(unit))
full_sta = loadmat(fname)['temp_stas'].transpose(1,0,2,3)[:,:,:,-len(x_locs):]
vmax = np.max(full_sta)
vmin = np.min(full_sta)
for ii, (x_loc, y_loc) in enumerate(zip(x_locs, y_locs)):
ax = plt.subplot(gs[x_loc, y_loc])
img = full_sta[:,:,:,ii]
ax.imshow(img, vmin=vmin, vmax=vmax)
ax.set_xlim([0, 64])
ax.set_ylim([32,0])
ax.set_title('time {}'.format(ii), fontsize=self.fontsize)
return gs
def make_normalized_templates_plot(self):
fname = os.path.join(self.save_dir, 'normalized_templates.png')
if os.path.exists(fname):
return
(templates_mc, templates_sec,
ptp_mc, ptp_sec, _) = get_normalized_templates(
self.templates.transpose(2, 0, 1),
self.neigh_channels)
plt.figure(figsize=(14, 8))
x_range = np.arange(templates_mc.shape[1])/self.sampling_rate*1000
ths = [2, 4, 6]
for ii, th in enumerate(ths):
plt.subplot(2, 3, ii+1)
if ii == 0:
idx = np.logical_and(ptp_mc >= th, ptp_mc < ths[ii+1])
plt.title("Templates on Main Channel\n Templates with {} < PTP < {}".format(
th, ths[ii+1]), fontsize=self.fontsize//2)
elif ii == 1:
idx = np.logical_and(ptp_mc >= th, ptp_mc < ths[ii+1])
plt.title("Templates with {} < PTP < {}".format(th, ths[ii+1]), fontsize=self.fontsize//2)
else:
idx = ptp_mc >= th
plt.title("Templates with {} < PTP".format(th), fontsize=self.fontsize//2)
if sum(idx) > 0:
plt.plot(x_range, templates_mc[idx].T,
color='k', alpha=0.1)
plt.xlabel('time (ms)')
plt.xlim([0, np.max(x_range)])
if ii == 0:
plt.ylabel('Normalized Voltage (A.U.)',
fontsize=self.fontsize//2)
#if ii < 2:
# plt.xlabel('Main Chan., {} < PTP < {}'.format(th, ths[ii+1]), fontsize=self.fontsize//2)
#else:
# plt.xlabel('Main Chan., {} < PTP'.format(th), fontsize=self.fontsize//2)
for ii, th in enumerate(ths):
plt.subplot(2, 3, ii+4)
if ii == 0:
plt.title("Templates on Secondary Channels", fontsize=self.fontsize//2)
if ii < 2:
idx = np.logical_and(ptp_sec >= th, ptp_sec < ths[ii+1])
#plt.title("Templates with {} < PTP < {}".format(th, ths[ii+1]), fontsize=self.fontsize//2)
else:
idx = ptp_sec >= th
#plt.title("Templates with {} < PTP".format(th), fontsize=self.fontsize//2)
if sum(idx) > 0:
plt.plot(x_range, templates_sec[idx].T, color='k', alpha=0.02)
plt.xlabel('time (ms)')
plt.xlim([0, np.max(x_range)])
if ii == 0:
plt.ylabel('Normalized Voltage (A.U.)',
fontsize=self.fontsize//2)
#if ii < 2:
# plt.xlabel('Sec. Chan., {} < PTP < {}'.format(th, ths[ii+1]), fontsize=self.fontsize//2)
#else:
# plt.xlabel('Sec. Chan., {} < PTP'.format(th), fontsize=self.fontsize//2)
plt.suptitle('Aligned Templates on Their Main/Secondary Channels', fontsize=20)
plt.tight_layout(rect=[0, 0.01, 1, 0.95])
plt.savefig(fname, dpi=100)
plt.close()
def make_raster_plot(self):
fname = os.path.join(self.save_dir, 'raster.png')
if os.path.exists(fname):
return
plt.figure(figsize=(30,15))
ptps = self.ptps
order = np.argsort(ptps)
sorted_ptps = np.round(np.sort(ptps),2)
for j in range(self.n_units):
k = order[j]
idx = self.spike_train[:,1] == k
spt = self.spike_train[idx, 0]/self.sampling_rate
prob = self.soft_assignment[idx]
if np.sum(prob) > 1:
spt = np.sort(np.random.choice(
spt, int(np.sum(prob)), False, prob/np.sum(prob)))
plt.eventplot(spt, lineoffsets=j, color='k', linewidths=0.01)
plt.yticks(np.arange(0,self.n_units,10), sorted_ptps[0:self.n_units:10])
plt.ylabel('ptps', fontsize=self.fontsize)
plt.xlabel('time (seconds)', fontsize=self.fontsize)
plt.title('Raster Plot Sorted by PTP', fontsize=self.fontsize)
plt.savefig(fname, bbox_inches='tight', dpi=100)
plt.close()
def make_firing_rate_plot(self):
fname = os.path.join(self.save_dir, 'firing_rates.png')
if os.path.exists(fname):
return
unique_labels = np.unique(self.rf_labels)
fontsize = 15
n_figs = len(self.cell_types)
n_cols = 3
n_rows = int(np.ceil(n_figs/n_cols))
max_fr = np.max(self.f_rates)
plt.figure(figsize=(5*n_cols, n_rows*3))
x_max = int(np.max(np.log(self.ptps))) + 1
x_ticks = np.round(np.exp(np.arange(0, x_max+1)), 1)
y_max = int(np.max(np.log(self.f_rates))) + 1
y_ticks = np.round(np.exp(np.arange(0, y_max+1)), 1)
for ii, label in enumerate(unique_labels):
plt.subplot(n_rows, n_cols, ii+1)
idx_ = self.rf_labels == label
plt.scatter(np.log(self.ptps[idx_]),
np.log(self.f_rates[idx_]),
color=self.colors[label],
alpha=0.5)
plt.xticks(np.arange(0, x_max+1), x_ticks)
plt.yticks(np.arange(0, y_max+1), y_ticks)
plt.xlim([-0.5, x_max+0.5])
plt.ylim([-0.5, y_max+0.5])
plt.title(self.cell_types[label], fontsize=fontsize)
if ii == 0:
plt.ylabel('firing rates', fontsize=fontsize)
if ii == (n_rows-1)*n_cols:
plt.xlabel('ptps (log scaled)', fontsize=fontsize)
if ii % n_cols != 0:
plt.yticks([])
plt.subplots_adjust(top = 0.85, wspace = 0.001, hspace=0.3)
plt.suptitle('Firing Rate vs. PTP', fontsize=2*fontsize)
plt.savefig(fname, bbox_inches='tight', dpi=100)
plt.close()
def add_residual_qq_plot(self):
fname = os.path.join(self.save_dir, 'residual_qq_plot.png')
if os.path.exists(fname):
return
nrow = int(np.sqrt(self.n_channels))
ncol = int(np.ceil(self.n_channels/nrow))
plt.figure(figsize=(int(ncol*2.5), nrow*2))
sample_size = 10000
t_start = int(np.random.choice(
self.reader_resid.rec_len-sample_size-1, 1)[0])
res = self.reader_resid.read_data(
t_start, t_start+sample_size)
th = np.sort(np.random.normal(size = sample_size))
for c in range(self.n_channels):
qq = np.sort(np.random.choice(
res[:, c], sample_size, False))
plt.subplot(nrow, ncol, c+1)
plt.subplots_adjust(top = 0.95, wspace = 0.001)
plt.scatter(th, qq, s=5)
min_val = np.min((th.min(), qq.min()))
max_val = np.max((th.max(), qq.max()))
plt.plot([min_val, max_val], [min_val, max_val], color='r')
plt.title('Channel: {}'.format(c))
plt.xticks([])
plt.yticks([])
plt.suptitle(
'QQ plot of Residual Recording: Sample from {} to {} Timepoints'.format(
t_start, t_start+sample_size), fontsize=int(3*ncol))
#plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig(fname, bbox_inches='tight', dpi=100)
plt.close()
def residual_varaince(self):
fname = os.path.join(self.save_dir, 'residual_variance.png')
if os.path.exists(fname):
return
# calculate variance per unit
n_examples = 1000
units, counts = np.unique(self.spike_train[:, 1], return_counts=True)
units = units[counts > 10]
resid_var = np.zeros((len(units), self.n_times_templates))
for ii, unit in enumerate(units):
idx = np.where(self.spike_train[:,1]==unit)[0]
idx = np.random.choice(idx,
np.min((n_examples, len(idx))),
False)
spt = self.spike_train[idx, 0]
mc = self.templates[:, :, unit].ptp(0).argmax()
resid_var[ii] = np.var(self.reader_resid.read_waveforms(spt, None, [mc])[0][:,:,0], 0)
# values for plotting
ptps = self.ptps[units]
max_var = resid_var.max(1)
rf_labels = self.rf_labels[units]
unique_labels = np.unique(rf_labels)
plt.figure(figsize=(15,5))
plt.subplot(1,2,1)
plt.plot(np.arange(resid_var.shape[1])/self.sampling_rate*1000, resid_var.T, 'k', alpha=0.2)
plt.ylim([0,5])
plt.xlabel('time (ms)', fontsize=self.fontsize)
plt.ylabel('maximum variance', fontsize=self.fontsize)
plt.title('Time vs Residual Variance of all units')
plt.subplot(1,2,2)
legends = []
for ii, label in enumerate(unique_labels):
idx_ = rf_labels == label
plt.scatter(np.log(ptps[idx_]), max_var[idx_], color=self.colors[label], alpha=0.5)
legends.append(mpatches.Patch(color = self.colors[label], label = self.cell_types[ii]))
plt.legend(handles=legends, loc='center left', bbox_to_anchor=(1, 0.5), fontsize=self.fontsize)
x_max = int(np.max(np.log(ptps))) + 1
x_ticks = np.round(np.exp(np.arange(0, x_max+1)), 1)
plt.xticks(np.arange(0, x_max+1), x_ticks)
plt.xlim([0, x_max])
plt.xlabel('ptp (log scaled)', fontsize=self.fontsize)
plt.ylabel('Maximum Variance', fontsize=self.fontsize)
plt.title('PTP vs Maximum Residual Variance')
plt.savefig(fname, bbox_inches='tight', dpi=100)
plt.close()
def add_raw_resid_snippets(self):
save_dir = os.path.join(self.save_dir, 'raw_residual_snippets/')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
n_batches = np.minimum(self.reader.n_batches, 10)
n_big = 5
n_random = 5
t_window = 50
batch_ids = np.random.choice(self.reader.n_batches, n_batches)
for batch_id in batch_ids:
fname = os.path.join(save_dir, 'large_residual_chunk_{}.png'.format(batch_id))
if os.path.exists(fname):
continue
offset = self.reader.idx_list[batch_id][0]
raw = self.reader.read_data_batch(batch_id)
res = self.reader_resid.read_data_batch(batch_id)
max_res = np.abs(np.max(res, 1))
max_ids = scipy.signal.argrelmax(max_res, order=2*t_window+1)[0]
max_ids = max_ids[np.logical_and(max_ids > t_window, max_ids < res.shape[0]-t_window)]
max_res = max_res[max_ids]
id_keep = max_res > 4
max_ids = max_ids[id_keep]
max_res = max_res[id_keep]
id_big = max_ids[np.argsort(max_res)[-n_big:]]
id_random = np.random.choice(max_ids, n_random)
id_big_c = res[id_big].argmax(1)
id_random_c = res[id_random].argmax(1)
id_check = np.hstack((id_big, id_random))
id_check_c = np.hstack((id_big_c, id_random_c))
x_range = np.arange(t_window*2+1)/self.sampling_rate*1000
plt.figure(figsize=(30, 10))
for j in range(n_big+n_random):
chans = np.where(self.neigh_channels[id_check_c[j]])[0]
tt = id_check[j]
snip_raw = raw[:, chans][tt-t_window:tt+t_window+1]
snip_res = res[:, chans][tt-t_window:tt+t_window+1]
spread = np.arange(len(chans))*10
plt.subplot(2, n_big, j+1)
plt.plot(x_range, snip_raw+spread[None], 'k')
plt.plot(x_range, snip_res+spread[None], 'r')
plt.xlabel('Time (ms)')
plt.yticks([])
y = -np.ones(t_window*2+1)
for s in spread:
plt.fill_between(x_range,
y + s,
y + s + 2,
color='grey',
alpha=0.1)
plt.title('Recording Time {}, Channel {}'.format(tt + offset, id_check_c[j]))
if j == 0:
legends = [mpatches.Patch(color = 'k', label = 'raw'),
mpatches.Patch(color = 'r', label = 'residual')
]
plt.legend(handles=legends)
plt.tight_layout()
plt.savefig(fname, bbox_inches='tight', dpi=100)
plt.close('all')
def cell_classification_plots(self):
fname = os.path.join(self.save_dir, 'contours.png')
if os.path.exists(fname):
return
idx_per_type = []
for j in range(len(self.cell_types[:-2])):
if self.cell_types[j] != 'Unknown':
idx_per_type.append(np.where(self.rf_labels == j)[0])
plt.figure(figsize=(50, 12))
for ii, idx in enumerate(idx_per_type):
plt.subplot(1, len(idx_per_type), ii+1)
for unit in idx:
# also plot all in one plot
plotting_data = self.get_circle_plotting_data(unit, self.gaussian_fits)
plt.plot(plotting_data[0],plotting_data[1], 'k', alpha=0.4)
plt.xlim([0, self.stim_size[1]])
plt.ylim([0, self.stim_size[0]])
plt.title(self.cell_types[ii], fontsize=30)
plt.suptitle('RF Contours by Cell Types', fontsize=50)
plt.tight_layout(rect=[0, 0.03, 1, 0.9])
plt.savefig(fname, bbox_inches='tight', dpi=100)
plt.close()
def make_rf_plots(self):
fname = os.path.join(self.save_dir, 'all_rfs.png')
if os.path.exists(fname):
return
n_units = self.STAs.shape[0]
idx_per_type = []
for j in range(len(self.cell_types)):
idx_per_type.append(np.where(self.rf_labels == j)[0])
n_cols = 10
n_rows_per_type = []
for idx in idx_per_type:
n_rows_per_type.append(int(np.ceil(len(idx)/float(n_cols))))
n_rows = sum(n_rows_per_type)+9
self.fontsize = 20
fig=plt.figure(figsize=(3*n_cols, 3*n_rows))
gs = gridspec.GridSpec(n_rows, n_cols, fig,
left=0, right=1, top=0.95, bottom=0.05,
hspace=0.2, wspace=0)
row = 0
for ii, idx in enumerate(idx_per_type):
col = 0
# add label
ax = plt.subplot(gs[row, 0])
plt.text(0.5, 0.5, self.cell_types[ii],
horizontalalignment='center',
verticalalignment='center',
fontsize=60,
transform=ax.transAxes)
ax.set_axis_off()
row += 1
idx_sort = idx[np.argsort(self.ptps[idx])[::-1]]
for unit in idx_sort:
fr = str(np.round(self.f_rates[unit], 1))
ptp = str(np.round(self.ptps[unit], 1))
title = "Unit: {}\n{}Hz, {}SU".format(unit, fr, ptp)
gs = self.add_RF_plot(gs, row, col, unit, None, title)
if col == 9:
col = 0
row += 1
else:
col += 1
if col != 0:
row += 1
#plt.tight_layout()
#fig.savefig(fname)
fig.savefig(fname, bbox_inches='tight', dpi=100)
fig.clf()
plt.close('all')
def make_all_templates_summary_plots(self):
fname = os.path.join(self.save_dir, 'all_templates.png')
if os.path.exists(fname):
return
n_units = self.n_units
idx_per_type = []
for j in range(len(self.cell_types)):
idx_per_type.append(np.where(self.rf_labels == j)[0])
n_cols = 10
n_rows_per_type = []
for idx in idx_per_type:
n_rows_per_type.append(int(np.ceil(len(idx)/float(n_cols))))
n_rows = sum(n_rows_per_type)+9
self.fontsize = 20
fig=plt.figure(figsize=(3*n_cols, 3*n_rows))
gs = gridspec.GridSpec(n_rows, n_cols, fig,
left=0, right=1, top=0.95, bottom=0.05,
hspace=0.5, wspace=0)
row = 0
for ii, idx in enumerate(idx_per_type):
col = 0
# add label
ax = plt.subplot(gs[row, 0])
plt.text(0.5, 0.5, self.cell_types[ii],
horizontalalignment='center',
verticalalignment='center',
fontsize=60,
transform=ax.transAxes)
ax.set_axis_off()
row += 1
idx_sort = idx[np.argsort(self.ptps[idx])[::-1]]
for unit in idx_sort:
fr = str(np.round(self.f_rates[unit], 1))
ptp = str(np.round(self.ptps[unit], 1))
title = "Unit: {}\n{}Hz, {}SU".format(unit, fr, ptp)
gs = self.add_template_summary(
gs, row, col, unit, title=title,
add_color_bar=False, scale=8)
if col == 9:
col = 0
row += 1
else:
col += 1
if col != 0:
row += 1
#plt.tight_layout()
fig.savefig(fname, bbox_inches='tight', dpi=100)
fig.clf()
plt.close('all')
def make_all_rf_templates_plots(self):
idx_per_type = []
for j in range(len(self.cell_types)):
idx_per_type.append(np.where(self.rf_labels == j)[0])
n_cols = 10
self.fontsize = 20
for ii in range(len(self.cell_types)):
type_name = self.cell_types[ii]
fname = os.path.join(self.save_dir_ind,
'all_rf_templates_{}.png'.format(type_name))
if os.path.exists(fname):
continue
idx = idx_per_type[ii]
idx_sort = idx[np.argsort(self.ptps[idx])[::-1]]
n_rows = int(np.ceil(len(idx)/float(n_cols/2))) + 1
fig=plt.figure(figsize=(3*n_cols, 3*n_rows))
gs = gridspec.GridSpec(n_rows, n_cols, fig,
left=0, right=1, top=1, bottom=0.05,
hspace=0.5, wspace=0)
ax = plt.subplot(gs[0, n_cols//2])
plt.text(0.5, 0.5, type_name,
horizontalalignment='center',
verticalalignment='center',
fontsize=4*self.fontsize,
transform=ax.transAxes)
ax.set_axis_off()
row = 1
col = 0
for unit in idx_sort:
fr = str(np.round(self.f_rates[unit], 1))
ptp = str(np.round(self.ptps[unit], 1))
title = "Unit: {}, {}Hz, {}SU".format(unit, fr, ptp)
gs = self.add_template_summary(
gs, row, col, unit, title=title,
add_color_bar=False, scale=8)
col += 1
gs = self.add_RF_plot(gs, row, col, unit, None, None)
if col == n_cols-1:
col = 0
row += 1
else:
col += 1
#plt.tight_layout()
fig.savefig(fname, bbox_inches='tight', dpi=100)
fig.clf()
plt.close('all')
class CompareSpikeTrains(Visualizer):
def __init__(self, fname_templates, fname_spike_train, rf_dir,
fname_recording, recording_dtype,
fname_geometry, sampling_rate, save_dir,
fname_templates2=None, fname_spike_train2=None, rf_dir2=None,
fname_set1_idx=None):
if fname_set1_idx == None and (fname_templates2==None or fname_spike_train2==None or rf_dir2==None):
raise ValueError('something is not right!!')
# TODO: Finish it!!!
if fname_set1_idx == None:
# saving directory location
tmp_dir = os.path.join(save_dir,'tmp')
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
templates1 = np.load(fname_templates)
spike_train1 = np.load(fname_spike_train)
templates2 = np.load(fname_templates2)
spike_train2 = np.load(fname_spike_train2)
STAs1 = np.load(os.path.join(rf_dir, 'STA_spatial.npy'))
STAs_temporal1 = np.load(os.path.join(rf_dir, 'STA_temporal.npy'))
Gaussian_params1 = np.load(os.path.join(rf_dir, 'gaussian_fits.npy'))
STAs2 = np.load(os.path.join(rf_dir2, 'STA_spatial.npy'))
STAs_temporal2 = np.load(os.path.join(rf_dir2, 'STA_temporal.npy'))
Gaussian_params2 = np.load(os.path.join(rf_dir2, 'gaussian_fits.npy'))
idx_single_rf1 = np.load(os.path.join(rf_dir, 'idx_single_rf.npy'))
idx_no_rf1 = np.load(os.path.join(rf_dir, 'idx_no_rf.npy'))
idx_multi_rf1 = np.load(os.path.join(rf_dir, 'idx_multi_rf.npy'))
rf_labels1 = np.load(os.path.join(rf_dir, 'labels.npy'))
idx_single_rf2 = np.load(os.path.join(rf_dir2, 'idx_single_rf.npy'))
idx_no_rf2 = np.load(os.path.join(rf_dir2, 'idx_no_rf.npy'))
idx_multi_rf2 = np.load(os.path.join(rf_dir2, 'idx_multi_rf.npy'))
rf_labels2 = np.load(os.path.join(rf_dir2, 'labels.npy'))
templates, spike_train = combine_two_spike_train(
templates1, templates2, spike_train1, spike_train2)
STAs, Gaussian_params = combine_two_rf(
STAs1, STAs2, Gaussian_params1, Gaussian_params2)
STAs_temporal = np.concatenate((STAs_temporal1, STAs_temporal2), axis=0)
K1 = templates1.shape[2]
K2 = templates2.shape[2]
set1_idx = np.zeros(K1+K2, 'bool')
set1_idx[:K1] = 1
idx_single_rf = np.hstack((idx_single_rf1, idx_single_rf2+K1))
idx_no_rf = np.hstack((idx_no_rf1, idx_no_rf2+K1))
idx_multi_rf = np.hstack((idx_multi_rf1, idx_multi_rf2+K1))
rf_labels = np.hstack((rf_labels1, rf_labels2))
fname_templates = os.path.join(tmp_dir, 'templates_combined.npy')
fname_spike_train = os.path.join(tmp_dir, 'spike_train_combined.npy')
rf_dir = tmp_dir
fname_set1_idx = os.path.join(tmp_dir, 'set1_idx.npy')
np.save(fname_templates, templates)
np.save(fname_spike_train, spike_train)
np.save(fname_set1_idx, set1_idx)
np.save(os.path.join(rf_dir, 'STA_spatial.npy'), STAs)
np.save(os.path.join(rf_dir, 'STA_temporal.npy'), STAs_temporal)
np.save(os.path.join(rf_dir, 'gaussian_fits.npy'), Gaussian_params)
np.save(os.path.join(rf_dir, 'idx_single_rf.npy'), idx_single_rf)
np.save(os.path.join(rf_dir, 'idx_no_rf.npy'), idx_no_rf)
np.save(os.path.join(rf_dir, 'idx_multi_rf.npy'), idx_multi_rf)
np.save(os.path.join(rf_dir, 'labels.npy'), rf_labels)
set1_idx = np.load(fname_set1_idx)
Visualizer.__init__(self, fname_templates, fname_spike_train,
fname_recording, recording_dtype,
fname_geometry, sampling_rate, save_dir, rf_dir)
self.set1_idx = set1_idx
# fix nearest units!
self.fix_nearest_units()
def fix_nearest_units(self):
templates1 = self.templates[:, :, self.set1_idx].transpose(2,0,1)
templates2 = self.templates[:, :, ~self.set1_idx].transpose(2,0,1)
STAs1 = self.STAs[self.set1_idx]
STAs2 = self.STAs[~self.set1_idx]
nearest_units1, nearest_units2 = compute_neighbours2(
templates1, templates2, self.n_neighbours)
nearest_units_rf1, nearest_units_rf2 = compute_neighbours_rf2(
STAs1, STAs2, self.n_neighbours)
set1_idx = np.where(self.set1_idx)[0]
set2_idx = np.where(~self.set1_idx)[0]
nearest_units = np.copy(self.nearest_units)
nearest_units_rf = np.copy(self.nearest_units_rf)
for k in range(self.n_units):
if np.any(set1_idx==k):
ii = np.where(set1_idx == k)[0]
temp = nearest_units1[ii]
nearest_units[k] = set2_idx[temp]
temp = nearest_units_rf1[ii]
nearest_units_rf[k] = set2_idx[temp]
else:
ii = np.where(set2_idx == k)[0]
temp = nearest_units2[ii]
nearest_units[k] = set1_idx[temp]
temp = nearest_units_rf2[ii]
nearest_units_rf[k] = set1_idx[temp]
self.nearest_units = nearest_units
self.nearest_units_rf = nearest_units_rf
def get_normalized_templates(templates, neigh_channels):
"""
plot normalized templates on their main channels and secondary channels
templates: number of channels x temporal window x number of units
geometry: number of channels x 2
"""
K, R, C = templates.shape
mc = np.argmax(templates.ptp(1), 1)
# get main channel templates
templates_mc = np.zeros((K, R))
for k in range(K):
templates_mc[k] = templates[k, :, mc[k]]
# shift templates_mc
best_shifts_mc = align_get_shifts_with_ref(
templates_mc)
templates_mc = shift_chans(templates_mc, best_shifts_mc)
ptp_mc = templates_mc.ptp(1)
# normalize templates
norm_mc = np.linalg.norm(templates_mc, axis=1, keepdims=True)
templates_mc /= norm_mc
# get secdonary channel templates
templates_sec = np.zeros((0, R))
best_shifts_sec = np.zeros(0)
unit_ids_sec = np.zeros((0), 'int32')
for k in range(K):
neighs = np.copy(neigh_channels[mc[k]])
neighs[mc[k]] = False
neighs = np.where(neighs)[0]
templates_sec = np.concatenate((templates_sec, templates[k, :, neighs]), axis=0)
best_shifts_sec = np.hstack((best_shifts_sec, np.repeat(best_shifts_mc[k], len(neighs))))
unit_ids_sec = np.hstack((unit_ids_sec, np.ones(len(neighs), 'int32')*k))
# shift templates_sec
best_shifts_sec = align_get_shifts_with_ref(
templates_sec)
templates_sec = shift_chans(templates_sec, best_shifts_sec)
ptp_sec = templates_sec.ptp(1)
# normalize templates
norm_sec = np.linalg.norm(templates_sec, axis=1, keepdims=True)
templates_sec /= norm_sec
return templates_mc, templates_sec, ptp_mc, ptp_sec, unit_ids_sec
def pca_denoise(data, pca_mean, pca_components):
data_pca = np.matmul(data-pca_mean, pca_components.T)
return np.matmul(data_pca, pca_components)+pca_mean
| apache-2.0 |
bhargav/scikit-learn | examples/applications/svm_gui.py | 124 | 11251 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
try:
import tkinter as Tk
except ImportError:
# Backward compat for Python 2
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
ChenguangZhang/Python_CFD_Course | lesson_09.py | 1 | 1224 | #!/usr/bin/python
import time, sys
import numpy as np
import matplotlib.pyplot as plt
# About: solver the linear diffusion equation
# simulation parameters
lx = 2.0
ly = 1.0
nx = 40
ny = 20
dx = lx/nx
dy = ly/ny
nt = 100
# apply ghost cell technique
cellx = dx/2+dx * np.arange(nx) # coordinate of cell center
celly = dy/2+dy * np.arange(ny)
cellxx, cellyy = np.meshgrid(cellx,celly)
# initial condition
phi = np.zeros((nx+2,ny+2))
#phi[0.5/dx:1/dx+1, 0.5/dy:1/dy+1]=2
#
l1norm = []
phin = np.ones((nx+2,ny+2))
for n in range(2*nt):
for i in range(1,nx+1):
for j in range(1,ny+1):
#phin[i,j] = (dy*dy*(phi[i+1,j]+phi[i-1,j]) + dx*dx*(phi[i,j-1]+phi[i,j+1]))/(2*(dx*dx+dy*dy))
phin[i,j] = ((phi[i+1,j]+phi[i-1,j]) + (phi[i,j-1]+phi[i,j+1]))/4.0
phin[0,:] = 0
phin[-1,1:ny+1] = 2*celly - phin[-2,1:ny+1]
#phin[:,0] = phin[:,1]
phin[:,0] = 0
phin[:,-1] = phin[:,-2]
l1norm.append((np.sum(np.abs(phi[:])-np.abs(phin[:])))/np.sum(np.abs(phin[:])))
phin,phi = phi,phin
if n%5 == 0:
plt.clf()
plt.pcolormesh(cellxx,cellyy, phi[1:-1,1:-1].T)
plt.axis('equal')
plt.title('%03d'%n)
plt.savefig('%03d.png'%n)
| gpl-2.0 |
mardom/GalSim | devel/external/test_pse_corr.py | 1 | 6421 | import galsim
import numpy as np
import matplotlib.pyplot as plt
import subprocess
from scipy.special import jv
# This uses Mike Jarvis's corr2 program for calculating the correlation function.
# It is available at https://code.google.com/p/mjarvis/ and needs to be installed separately.
### set up basic parameters ###
# file containing theoretical P(k), with fake values added above ell=2000
pkfile = 'ps.wmap7lcdm.2000.dat'
theory_tab = galsim.LookupTable(file=pkfile, interpolant='linear')
# N for our grid used for estimating shears
grid_nx = 100
# length of grid in one dimension (degrees)
theta = 10. # degrees
dtheta = theta/grid_nx
extra_res = 10 # Extra resolution factor for g1,g2 grid.
# parameters for corr2:
min_sep = dtheta
max_sep = grid_nx * np.sqrt(2) * dtheta
nbins = 100
# Make deterministic
rng = galsim.BaseDeviate(1234)
# To save time debugging, use the existing corr files
use_saved = False
class xi_integrand:
def __init__(self, pk, r, n):
self.pk = pk
self.r = r
self.n = n
def __call__(self, k):
return k * self.pk(k) * jv(self.n, self.r*k)
def calculate_xi(r, pk, n):
"""Calculate xi+(r) or xi-(r) from a power spectrum.
"""
#print 'Start calculate_xi'
# xi+/-(r) = 1/2pi int(dk k P(k) J0/4(kr), k=0..inf)
int_min = pk.x_min
int_max = pk.x_max
rrad = r * np.pi/180. # Convert to radians
xi = np.zeros_like(r)
for i in range(len(r)):
integrand = xi_integrand(pk, rrad[i], n)
xi[i] = galsim.integ.int1d(integrand, int_min, int_max,
rel_err=1.e-6, abs_err=1.e-12)
xi /= 2. * np.pi
return xi
def doplot(r, t_xip, t_xim, t_xiket, xip, xim, xix, xik, xiket, xikex, pref):
fig = plt.figure()
ax = fig.add_subplot(111)
nonzero = (xip != 0.)
ax.plot(r, t_xip, 'black', label='Theory xi+')
ax.plot(r, t_xim, 'grey', label='Theory xi-')
ax.plot(r[nonzero], xip[nonzero], 'blue', label='Observed xi+')
ax.plot(r[nonzero], xim[nonzero], 'green', label='Observed xi-')
ax.plot(r[nonzero], xix[nonzero], 'red', label='Observed xix')
ax.plot(r, -t_xip, 'black', ls='dashed')
ax.plot(r, -t_xim, 'grey', ls='dashed')
ax.plot(r[nonzero], -xip[nonzero], 'blue', ls='dashed')
ax.plot(r[nonzero], -xim[nonzero], 'green', ls='dashed')
ax.plot(r[nonzero], -xix[nonzero], 'red', ls='dashed')
plt.ylim(1e-8,2e-5)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('r')
ax.set_ylabel(r'$\xi$')
ax.set_title('Shear-Shear Correlations')
plt.legend(loc='upper right')
figfile = pref + '_e2.jpg'
plt.savefig(figfile)
print 'Wrote to file ',figfile
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(r, t_xip, 'black', label='Theory xi_kappa')
ax.plot(r[nonzero], xik[nonzero], 'blue', label='Observed xi_kappa')
ax.plot(r, -t_xip, 'black', ls='dashed')
ax.plot(r[nonzero], -xik[nonzero], 'blue', ls='dashed')
plt.ylim(1e-8,2e-5)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('r')
ax.set_ylabel(r'$\xi$')
ax.set_title('Kappa-Kappa Correlations')
plt.legend(loc='upper right')
figfile = pref + '_k2.jpg'
plt.savefig(figfile)
print 'Wrote to file ',figfile
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(r, t_xiket, 'black', label='Theory <kappa gamma_t>')
ax.plot(r[nonzero], xiket[nonzero], 'blue', label='Observed <kappa gamma_t>')
ax.plot(r[nonzero], xikex[nonzero], 'red', label='Observed <kappa gamma_x>')
ax.plot(r, -t_xiket, 'black', ls='dashed')
ax.plot(r[nonzero], -xiket[nonzero], 'blue', ls='dashed')
ax.plot(r[nonzero], -xikex[nonzero], 'red', ls='dashed')
plt.ylim(1e-8,2e-5)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('r')
ax.set_ylabel(r'$\xi$')
ax.set_title('Kappa-Shear Correlations')
plt.legend(loc='upper right')
figfile = pref + '_ke.jpg'
plt.savefig(figfile)
print 'Wrote to file ',figfile
def run_corr2(x, y, g1, g2, k):
import pyfits
import os
# Use fits binary table for faster I/O. (Converting to/from strings is slow.)
assert x.shape == y.shape
assert x.shape == g1.shape
assert x.shape == g2.shape
assert x.shape == k.shape
x_col = pyfits.Column(name='x', format='1D', array=x.flatten() )
y_col = pyfits.Column(name='y', format='1D', array=y.flatten() )
g1_col = pyfits.Column(name='g1', format='1D', array=g1.flatten() )
g2_col = pyfits.Column(name='g2', format='1D', array=g2.flatten() )
k_col = pyfits.Column(name='k', format='1D', array=k.flatten() )
cols = pyfits.ColDefs([x_col, y_col, g1_col, g2_col, k_col])
table = pyfits.new_table(cols)
phdu = pyfits.PrimaryHDU()
hdus = pyfits.HDUList([phdu,table])
hdus.writeto('temp.fits',clobber=True)
subprocess.Popen(['corr2','corr2.params',
'e2_file_name=temp.e2', 'k2_file_name=temp.k2',
'min_sep=%f'%min_sep,'max_sep=%f'%max_sep,'nbins=%f'%nbins]).wait()
subprocess.Popen(['corr2','corr2.params',
'file_name2=temp.fits', 'ke_file_name=temp.ke',
'min_sep=%f'%min_sep,'max_sep=%f'%max_sep,'nbins=%f'%nbins]).wait()
os.remove('temp.fits')
if use_saved:
print 'Using existing temp.e2, temp.k2, temp.ke'
else:
print 'Build Gridded g1,g2,kappa'
test_ps=galsim.PowerSpectrum(e_power_function = theory_tab, units='radians')
g1, g2, k = test_ps.buildGrid(grid_spacing=dtheta, ngrid=grid_nx*extra_res,
rng=rng, units='degrees', get_convergence=True)
grid_range = dtheta * np.arange(grid_nx*extra_res)
x, y = np.meshgrid(grid_range, grid_range)
print 'Calculate correlations'
run_corr2(x,y,g1,g2,k)
e2 = np.loadtxt('temp.e2')
k2 = np.loadtxt('temp.k2')
ke = np.loadtxt('temp.ke')
#os.remove('temp.e2')
#os.remove('temp.k2')
#os.remove('temp.ke')
r = e2[:,1]
xip = e2[:,2]
xim = e2[:,3]
xix = e2[:,5]
w = e2[:,7]
xik = k2[:,2]
xiket = ke[:,2]
xikex = ke[:,3]
print "Convert between corr and ps"
theory_xip = calculate_xi(r,theory_tab,0)
theory_xim = calculate_xi(r,theory_tab,4)
theory_xiket = calculate_xi(r,theory_tab,2)
print "Making figures of dimensionless power, and writing to files"
doplot(r, theory_xip, theory_xim, theory_xiket, xip, xim, xix, xik, xiket, xikex, 'test_pse_corr')
| gpl-3.0 |
khalido/nd101 | gradient_descent.py | 1 | 2041 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Step 1 - collect our data
df = pd.read_csv('data/live_reg_data.csv', header=None)
print(df.head())
#collect data using numpy
points = np.genfromtxt('data/live_reg_data.csv', delimiter=',')
points[:5]
#lets see the data
plt.scatter(df[0], df[1])
plt.show()
#parameters
learning_rate = 0.0001
initial_b = 0
initial_m = 0
num_iterations = 1000
def compute_error_for_line_given_points(b, m , points):
totalError = 0 #initialize error at 0
for i in range(0, len(points)): #for every point
x = points[i, 0] #get x val
y = points[i, 1] #get y val
totalError += (y - (m*x + b)) **2
return totalError / float(len(points))
def gradient_descent_runner(points, starting_b, starting_m, learning_rate, num_iterations):
b = starting_b
m = starting_m
#gradient descent
for i in range(num_iterations):
#update b & m with new more accurate b and m
b, m = step_gradient(b, m, np.array(points), learning_rate)
return [b,m]
def step_gradient(b_current, m_current, points, learningRate):
b_gradient = 0
m_gradient = 0
N = float(len(points))
for i in range(0, len(points)):
x = points[i, 0]
y = points[i, 1]
#direction with respect to b and m
#computing partial deriavitives of our error function
b_gradient += -(2/N) * (y - ((m_current * x) + b_current))
m_gradient += -(2/N) * x * (y - ((m_current * x) + b_current))
new_b = b_current - (learningRate * b_gradient)
new_m = m_current - (learningRate * m_gradient)
return [new_b, new_m]
print('starting gradient descent at b = {0}, m = {1}, error = {2}'.format(initial_b, initial_m, compute_error_for_line_given_points(initial_b, initial_m, points)))
[b, m] = gradient_descent_runner(points, initial_b, initial_m, learning_rate, num_iterations)
print('ending point at b = {1}, m = {2}, error = {3}'.format(num_iterations, b, m, compute_error_for_line_given_points(b, m, points)))
| gpl-3.0 |
MatthieuGilson/EC_estimation | optimization_movie.py | 1 | 6652 | import os, sys
import numpy as np
import scipy.linalg as spl
import scipy.stats as stt
import matplotlib.pyplot as pp
import pickle as pklupf.edu
os.system('clear')
obj_dir = './'
res_dir = 'res_movie/'
if not os.path.exists(res_dir):
print 'create directory:', res_dir
os.makedirs(res_dir)
print obj_dir, ';', res_dir
graph_format = 'png'
##################
# fMRI time series
ts_emp = np.load('rest_movie_ts.npy')
n_sub = 22 # number of subjects
n_cond = 2 # rest and movie conditions
n_session = 2 # 2 sessions per condition
N = 66 # number of ROIs
T = 300 # number of TRs of the recording
# time shifts for FC: 0, 1 and 2 TR
v_tau = np.arange(3,dtype=float)
n_tau = v_tau.size
# FC = spatiotemporal covariance of BOLD signals (average of the 2 sessions)
FC_emp = np.zeros([n_sub,n_cond,n_tau,N,N])
for i_sub in range(n_sub):
for i_run in range(n_cond*n_session):
ts_emp[i_sub,i_run,:,:] -= np.outer(ts_emp[i_sub,i_run,:,:].mean(1),np.ones(T)) # center the time series
for i_sub in range(n_sub):
for i_cond in range(n_cond):
for i_session in range(n_session):
for i_tau in range(n_tau):
FC_emp[i_sub,i_cond,i_tau,:,:] += np.tensordot(ts_emp[i_sub,i_cond*2+i_session,:,0:T-n_tau+1],ts_emp[i_sub,i_cond*2+i_session,:,i_tau:T-n_tau+1+i_tau],axes=(1,1)) / float((T-n_tau)*n_session)
FC_emp *= 0.5/FC_emp[:,0,0,:,:].mean()
print 'max FC value (most of the distribution should be between 0 and 1):', FC_emp.mean()
# time constant for BOLD autocovariances
slopes = np.zeros([n_sub,n_cond,N])
for i_sub in range(n_sub):
for i_cond in range(n_cond):
for i in range(N):
ac_tmp = np.maximum(FC_emp[i_sub,i_cond,:,i,i],1e-10) # autocovariance for time shifts in v_tau; with lower bound to avoid negative values (cf. log)
slopes[i_sub,i_cond,i] = np.polyfit(v_tau,np.log(ac_tmp),1)[0] # slope of autocovariance for ROI i
tau_x = -1./slopes.mean(2) # inverse of negative slope of autocovariance
#################
# structural data
SC_anat = np.load(obj_dir+'SC_anat.npy')
lim_SC = 0. # limit DTI value to determine SC (only connections with larger values are tuned)
# mask for existing connections for EC
mask_EC = np.zeros([N,N],dtype=bool) # EC weights to tune
mask_EC[SC_anat>lim_SC] = True
for i in range(N):
mask_EC[i,i] = False # no self connection
mask_EC[i,N-1-i] = False # additional interhemispheric connections
print 'EC density:', mask_EC.sum()/float(N*(N-1))
# diagonal mask for input noise matrix (here, no input cross-correlation)
mask_Sigma = np.eye(N,dtype=bool)
##############
# optimization
# optimzation rates (to avoid explosion of activity, Sigma is tuned quicker)
epsilon_EC = 0.0005
epsilon_Sigma = 0.05
min_val_EC = 0. # minimal value for tuned EC elements
max_val_EC = 1. # maximal value for tuned EC elements
min_val_Sigma = 0. # minimal value for tuned Sigma elements
i_tau = 1 # time shift for optimization (in TR; can be 1 or 2)
tau = v_tau[i_tau]
print 'opt with time shift', tau, 'TR'
EC_mod = np.zeros([n_sub,n_cond,N,N])
Sigma_mod = np.zeros([n_sub,n_cond,N,N])
FC0_mod = np.zeros([n_sub,n_cond,N,N])
FCtau_mod = np.zeros([n_sub,n_cond,N,N])
for i_sub in range(n_sub):
for i_cond in range(n_cond):
print
print 'sub', i_sub, '; cond', i_cond
# initial EC
EC = np.zeros([N,N]) # initial connectivity
Sigma = np.eye(N) # initial noise
# record best fit (matrix distance between model and empirical FC)
best_dist = 1e10
# objective FC matrices (empirical)
FC0_obj = FC_emp[i_sub,i_cond,0,:,:]
FCtau_obj = FC_emp[i_sub,i_cond,i_tau,:,:]
stop_opt = False
i_opt = 0
while not stop_opt:
# calculate Jacobian of dynamical system
J = -np.eye(N)/tau_x[i_sub,:].mean() + EC
# calculate FC0 and FCtau for model
FC0 = spl.solve_lyapunov(J,-Sigma)
FCtau = np.dot(FC0,spl.expm(J.T*tau))
# matrices of model error
Delta_FC0 = FC0_obj-FC0
Delta_FCtau = FCtau_obj-FCtau
# calculate error between model and empirical data for FC0 and FC_tau (matrix distance)
dist_FC_tmp = 0.5*(np.sqrt((Delta_FC0**2).sum()/(FC0_obj**2).sum())+np.sqrt((Delta_FCtau**2).sum()/(FCtau_obj**2).sum()))
# calculate Pearson correlation between model and empirical data for FC0 and FC_tau
Pearson_FC_tmp = 0.5*(stt.pearsonr(FC0.reshape(-1),FC0_obj.reshape(-1))[0]+stt.pearsonr(FCtau.reshape(-1),FCtau_obj.reshape(-1))[0])
# record best model parameters
if dist_FC_tmp<best_dist:
best_dist = dist_FC_tmp
best_Pearson = Pearson_FC_tmp
i_best = i_opt
EC_mod_tmp = np.array(EC)
Sigma_mod_tmp = np.array(Sigma)
FC0_mod_tmp = np.array(FC0)
FCtau_mod_tmp = np.array(FCtau)
else:
stop_opt = i_opt>100
# Jacobian update
Delta_J = np.dot(np.linalg.pinv(FC0),Delta_FC0+np.dot(Delta_FCtau,spl.expm(-J.T*tau))).T/tau
# update EC (recurrent connectivity)
EC[mask_EC] += epsilon_EC * Delta_J[mask_EC]
EC[mask_EC] = np.clip(EC[mask_EC],min_val_EC,max_val_EC)
# update Sigma (input variances)
Delta_Sigma = -np.dot(J,Delta_FC0)-np.dot(Delta_FC0,J.T)
Sigma[mask_Sigma] += epsilon_Sigma * Delta_Sigma[mask_Sigma]
Sigma[mask_Sigma] = np.maximum(Sigma[mask_Sigma],min_val_Sigma)
# check for stop
if not stop_opt:
if (i_opt)%50==0:
print 'opt step:', i_opt
print 'dist FC:', dist_FC_tmp, '; Pearson FC:', Pearson_FC_tmp
i_opt += 1
else:
print 'stop at step', i_opt, 'with best FC dist:', best_dist, '; best FC Pearson:', best_Pearson
EC_mod[i_sub,i_cond,:,:] = EC_mod_tmp
Sigma_mod[i_sub,i_cond,:,:] = Sigma_mod_tmp
FC0_mod[i_sub,i_cond,:,:] = FC0_mod_tmp
FCtau_mod[i_sub,i_cond,:,:] = FCtau_mod_tmp
# save results
np.save(res_dir+'FC_emp.npy',FC_emp) # empirical spatiotemporal FC
np.save(res_dir+'mask_EC.npy',mask_EC) # mask of optimized connections
np.save(res_dir+'mask_Sigma.npy',mask_Sigma) # mask of optimized Sigma elements
np.save(res_dir+'EC_mod.npy',EC_mod) # estimated EC matrices
np.save(res_dir+'Sigma_mod.npy',Sigma_mod) # estimated Sigma matrices
np.save(res_dir+'FC0_mod.npy',FC0_mod) # model FC0
np.save(res_dir+'FCtau_mod.npy',FCtau_mod) # model FCtau (tau = 1 or 2 TR)
# various compiled results (if needed)
if False:
np.save(res_dir+'mean_FC0_rest.npy',FC_emp[:,0,0,:,:].mean(0))
np.save(res_dir+'mean_FC1_rest.npy',FC_emp[:,0,1,:,:].mean(0))
np.save(res_dir+'mean_EC_rest.npy',EC_mod[:,0,:,:].mean(0))
np.save(res_dir+'mean_Sigma_rest.npy',Sigma_mod[:,0,:,:].mean(0))
np.save(res_dir+'mean_FC0_movie.npy',FC_emp[:,1,0,:,:].mean(0))
np.save(res_dir+'mean_FC1_movie.npy',FC_emp[:,1,1,:,:].mean(0))
np.save(res_dir+'mean_EC_movie.npy',EC_mod[:,1,:,:].mean(0))
np.save(res_dir+'mean_Sigma_movie.npy',Sigma_mod[:,1,:,:].mean(0))
| gpl-3.0 |
dborzov/fredholm | fredholm/deamon.py | 1 | 4655 | import pickle, trivia
from config import *
import matplotlib.pyplot as pyplot
values=pickle.load(open( REPORT_FILE_FOLDER_NAME+PICKLE_FILENAME, "rb" ))
def plot_the_region(energy_value_range, dic_key):
pyplot.figure()
# plot_b=figure_b.add_subplot(111)
pyplot.ylabel('G(-3 eta|0)')
pyplot.xlabel('-3 eta')
pyplot.axhline(0.,color='k')
pyplot.axvline(1.,color='k')
pyplot.axvline(1.2704,color='k')
pyplot.axvline(16.522,color='k')
#pyplot.xscale('log')
for parameter_set in PARAMETER_CHOICE:
x=[record['E'] for record in values if trivia.satisfies(record,parameter_set)]
y=[record[dic_key] for record in values if trivia.satisfies(record,parameter_set)]
pyplot.plot(x,y,MARKER_NOTATION[dic_key],color=parameter_set['color'], markersize=6)
pyplot.xlim(energy_value_range)
header=open(REPORT_FILE_FOLDER_NAME+'static/header-summary.html',"r")
footer=open(REPORT_FILE_FOLDER_NAME+'static/footer.html',"r")
# summary file
report_file=open(REPORT_FILE_FOLDER_NAME+'summary/summary.html',"w")
report_file.writelines(header.readlines())
header.close()
report_file.write("<h2>"+str(len(values))+" records found in "+REPORT_FILE_FOLDER_NAME+PICKLE_FILENAME+" database file</h2>")
report_file.write("<table class='table table-bordered'><thead><tr>"
"<th>Name</th>"
"<th>dx</th>"
"<th>L</th>"
"<th>G(-3mu|0)</th>"
"<th>Iteration class</th>"
"</tr><tbody>")
for i,parameter_set in enumerate(PARAMETER_CHOICE):
report_file.write("<td bgcolor='"+parameter_set['color']+
"'><center><font color='#ffffff'>"+
parameter_set['name']+"</td><td>"+
str(parameter_set['dx'])+
"</td><td>"+str(parameter_set['L'])+
"</td><td>"+str(round(parameter_set['L']/parameter_set['dx'],0))+
"</td><td>"+str(parameter_set['iteration'])+
"</td></tr>")
report_file.write("</tbody></table><br><br>")
report_file.writelines(footer.readlines())
report_file.close()
# table of all database records
table_file=open(REPORT_FILE_FOLDER_NAME+'summary/table.html',"w")
header=open(REPORT_FILE_FOLDER_NAME+'static/header-table.html',"r")
table_file.writelines(header.readlines())
table_file.write("<table class='table table-bordered'>"
"<thead><tr>"
"<th>-3mu</th>"
"<th>dx</th>"
"<th>L</th>"
"<th>G(-3mu|0)</th>"
"<th>Computation time (seconds)</th>"
"<th>Iteration class</th>"
"</tr><tbody>")
for database_record in values:
if database_record['report_filename']!='':
table_file.write("<tr><td><h2><a href='../"+
str(database_record['report_filename'])+
"'>"+str(database_record['E'])+
"</a></td><td>"+str(database_record['dx'])+
"</td><td>"+str(database_record['L'])+
"</td><td>"+str(database_record['G3'])+
"</td><td>"+str(database_record['time'])+
"</td><td>"+str(database_record['iteration'])+
"</td></tr>")
else:
table_file.write("<tr><td>"+str(database_record['E'])+
"</td><td>"+str(database_record['dx'])+
"</td><td>"+str(database_record['L'])+
"</td><td>"+str(database_record['G3'])+
"</td><td>"+str(database_record['time'])+
"</td><td>"+str(database_record['iteration'])+
"</td></tr>")
table_file.write("</tbody></table><br><br>")
table_file.close()
plot_the_region([0.2,20.],'G3')
pyplot.xscale('log')
pyplot.ylim(-3.,1.)
pyplot.savefig(REPORT_FILE_FOLDER_NAME+'summary/summary.png')
pyplot.close()
plot_the_region([0.2,20.],'g3')
pyplot.ylim(-50.,30.)
pyplot.xscale('log')
pyplot.savefig(REPORT_FILE_FOLDER_NAME+'summary/summary-g3.png')
pyplot.close()
plot_the_region([5.0,18.0],'G3')
pyplot.ylim(-2.,2.)
pyplot.savefig(REPORT_FILE_FOLDER_NAME+'summary/b3-1.png')
pyplot.close()
plot_the_region([1.0,1.54],'G3')
pyplot.savefig(REPORT_FILE_FOLDER_NAME+'summary/b3-2.png')
pyplot.close()
plot_the_region([0.2,1.3],'G3')
pyplot.ylim(-1.,5.)
pyplot.savefig(REPORT_FILE_FOLDER_NAME+'summary/b2.png')
pyplot.close()
plot_the_region([0.0,1.1],'G3')
pyplot.savefig(REPORT_FILE_FOLDER_NAME+'summary/principal-value.png')
pyplot.close()
| mit |
efiring/numpy-work | numpy/fft/fftpack.py | 2 | 39065 | """
Discrete Fourier Transforms - FFT.py
The underlying code for these functions is an f2c translated and modified
version of the FFTPACK routines.
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
"""
__all__ = ['fft','ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn',
'refft', 'irefft','refftn','irefftn', 'refft2', 'irefft2']
from numpy.core import asarray, zeros, swapaxes, shape, conjugate, \
take
import fftpack_lite as fftpack
from helper import *
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache = _fft_cache ):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified." % n)
try:
wsave = fft_cache[n]
except(KeyError):
wsave = init_function(n)
fft_cache[n] = wsave
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0,n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0,s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
return r
def fft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT) algorithm. [CT]
Parameters
----------
a : array_like
Input array, can be complex
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`
See Also
--------
numpy.fft : for definition of the DFT and conventions used
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> from numpy import arange, pi, exp
>>> from numpy.fft import fft
>>> fft(exp(2j*pi*arange(8)/8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> from numpy.fft import fft, fftfreq
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = fft(np.sin(t))
>>> freq = fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
def ifft(a, n=None, axis=-1):
"""
Compute the one-dimensional inverse discrete Fourier Transform
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
`ifft(fft(a)) == a` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., `a[0]` should contain the zero frequency term,
`a[1:n/2+1]` should contain the positive-frequency terms, and
`a[n/2+1:]` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`
See Also
--------
numpy.fft : An introduction, with definitions and general explanations
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT
ifftn : The n-dimensional inverse FFT
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> from numpy.fft import ifft
>>> ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
>>> from numpy import exp, pi, arange, zeros
>>> import matplotlib.pyplot as plt
>>> t = arange(400)
>>> n = zeros((400,), dtype=complex)
>>> n[40:60] = exp(1j*np.random.uniform(0, 2*pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
>>> plt.legend(('real', 'imaginary'))
>>> plt.show()
Creates and plots a band-limited signal with random phases.
"""
a = asarray(a).astype(complex)
if n is None:
n = shape(a)[axis]
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) / n
def rfft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n/2+1`.
Raises
------
IndexError
if `axis` is larger than the last axis of `a`
See Also
--------
numpy.fft : for definition of the DFT and conventions used
irfft : The inverse of `rfft`
fft : The one-dimensional FFT of general (complex) input
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermite-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore `n/2+1`.
When `A = rfft(a)`, `A[0]` contains the zero-frequency term, which must be
purely real due to the Hermite symmetry.
If `n` is even, `A[-1]` contains the term for frequencies `n/2` and `-n/2`,
and must also be purely real. If `n` is odd, `A[-1]` contains the term
for frequency `A[(n-1)/2]`, and is complex in the general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> from numpy.fft import fft, rfft
>>> fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the nonnegative frequency terms.
"""
a = asarray(a).astype(float)
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, _real_fft_cache)
def irfft(a, n=None, axis=-1):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, `irfft(rfft(a), len(a)) == a` to within numerical accuracy.
(See Notes below for why `len(a)` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermite-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
Input array
n : int, optional
Length of the transformed axis of the output.
For `n` output points, `n/2+1` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input (along the axis specified by `axis`)
as explained below.
axis : int, optional
Axis over which to compute the inverse FFT.
Returns
-------
out : real ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
`2*(m-1)` where `m` is the length of the transformed axis of the input.
To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
if `axis` is larger than the last axis of `a`
See Also
--------
numpy.fft : for definition of the DFT and conventions used
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the nonnegative frequency terms of a
Hermite-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
`a_resamp = irfft(rfft(a), m)`.
Examples
--------
>>> from numpy.fft import ifft, irfft
>>> ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache) / n
def hfft(a, n=None, axis=-1):
"""
Compute the fft of a signal which spectrum has Hermitian symmetry.
Parameters
----------
a : array
input array
n : int
length of the hfft
axis : int
axis over which to compute the hfft
See also
--------
rfft
ihfft
Notes
-----
These are a pair analogous to rfft/irfft, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's hermite_fft for which
you must supply the length of the result if it is to be odd.
ihfft(hfft(a), len(a)) == a
within numerical accuracy.
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return irfft(conjugate(a), n, axis) * n
def ihfft(a, n=None, axis=-1):
"""
Compute the inverse fft of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the ihfft.
axis : int, optional
Axis over which to compute the ihfft.
See also
--------
rfft, hfft
Notes
-----
These are a pair analogous to rfft/irfft, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's hermite_fft for which
you must supply the length of the result if it is to be odd.
ihfft(hfft(a), len(a)) == a
within numerical accuracy.
"""
a = asarray(a).astype(float)
if n is None:
n = shape(a)[axis]
return conjugate(rfft(a, n, axis))/n
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = range(-len(s), 0)
if len(s) != len(axes):
raise ValueError, "Shape and axes have different lengths."
if invreal and shapeless:
s[axes[-1]] = (s[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = range(len(axes))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii])
return a
def fftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last `len(s)`
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
if `s` and `axes` have different length.
IndexError
if an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> from numpy import mgrid
>>> from numpy.fft import fftn
>>> a = mgrid[:3,:3,:3][0]
>>> fftn(a, axes=(1,2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> fftn(a, (2,2), axes=(0,1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> from numpy import meshgrid, pi, arange, sin, cos, log, abs
>>> from numpy.fft import fftn, fftshift
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2*pi*arange(200)/12, 2*pi*arange(200)/34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(fftshift(FS))**2))
>>> plt.show()
"""
return _raw_fftnd(a,s,axes,fft)
def ifftn(a, s=None, axes=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
`ifftn(fftn(a)) == a` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `ifft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on ifft zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last `len(s)`
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
if `s` and `axes` have different length.
IndexError
if an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : undoes `fftshift`, shifts zero-frequency terms to beginning
of array
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> from numpy import eye
>>> from numpy.fft import ifftn, fftn
>>> a = eye(4)
>>> ifftn(fftn(a, axes=(0,)),axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
>>> from numpy import zeros, exp
>>> from numpy.random import uniform
>>> from numpy.fft import ifftn
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80,20:40] = exp(1j*uniform(0, 2*pi, (20,20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
>>> plt.show()
Creates and plots an image with band-limited frequency content
"""
return _raw_fftnd(a, s, axes, ifft)
def fft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last 2
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
if `s` and `axes` have different length, or
`axes` not given and `len(s) != 2`
IndexError
if an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT
fft : The one-dimensional FFT
fftn : The *n*-dimensional FFT
fftshift : shifts zero-frequency terms to centre of array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> from numpy import mgrid
>>> from numpy.fft import fft2
>>> a = mgrid[:5, :5][0]
>>> fft2(a)
array([[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 5.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 10.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 15.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 20.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a,s,axes,fft)
def ifft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse discrete Fourier Transform
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, `ifft2(fft2(a)) == a`
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each axis) of the output (`s[0]` refers to axis 0,
`s[1]` to axis 1, etc.). This corresponds to `n` for `ifft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on ifft zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last 2
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
if `s` and `axes` have different length, or
`axes` not given and `len(s) != 2`
IndexError
if an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> from numpy import eye
>>> from numpy.fft import ifft2
>>> a = 4*eye(4)
>>> ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft)
def rfftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
The final element of `s` corresponds to `n` for `rfft(x, n)`, while
for the remaining axes, it corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last `len(s)`
axes are used, or all axes if `s` is also not specified.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be `s[-1]//2+1`,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
if `s` and `axes` have different length.
IndexError
if an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> from numpy import ones
>>> from numpy.fft import rfftn
>>> a = ones((3,3,3))
>>> rfftn(a)
array([[[ 27.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> rfftn(a, axes=(2,0))
array([[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 9.+0.j, 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
"""
a = asarray(a).astype(float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1])
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii])
return a
def rfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional fft of a real array.
Parameters
----------
a : array (real)
input array
s : sequence (int)
shape of the fft
axis : int
axis over which to compute the fft
Notes
-----
The 2-D fft of the real valued array a. This is really just rfftn with
different default behavior.
"""
return rfftn(a, s, axes)
def irfftn(a, s=None, axes=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, `irfftn(rfftn(a), a.shape) == a` to within numerical accuracy.
(The `a.shape` is necessary like `len(a)` is for `irfft`, and for the same
reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where `s[-1]//2+1` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. if `s` is not given, the shape of the input (along the
axes specified by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : real ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is `2*(m-1)` where `m` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
if `s` and `axes` have different length.
IndexError
if an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> from numpy import zeros
>>> from numpy.fft import irfftn, zeros
>>> a = zeros((4,4,3); a[0,0,0] = 64;
>>> irfftn(a)
array([[[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.]],
[[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.]],
[[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.]],
[[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.]]])
"""
a = asarray(a).astype(complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii])
a = irfft(a, s[-1], axes[-1])
return a
def irfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse fft of a real array.
Parameters
----------
a : array (real)
input array
s : sequence (int)
shape of the inverse fft
axis : int
axis over which to compute the inverse fft
Notes
-----
This is really irfftn with different default.
"""
return irfftn(a, s, axes)
# Deprecated names
from numpy import deprecate
refft = deprecate(rfft, 'refft', 'rfft')
irefft = deprecate(irfft, 'irefft', 'irfft')
refft2 = deprecate(rfft2, 'refft2', 'rfft2')
irefft2 = deprecate(irfft2, 'irefft2', 'irfft2')
refftn = deprecate(rfftn, 'refftn', 'rfftn')
irefftn = deprecate(irfftn, 'irefftn', 'irfftn')
| bsd-3-clause |
bnaul/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 16 | 6304 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import itertools
import numpy as np
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_raise_message
from sklearn.utils._testing import assert_warns_message
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet
from sklearn.covariance import fast_mcd
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def test_fast_mcd_on_invalid_input():
X = np.arange(100)
assert_raise_message(ValueError, 'Expected 2D array, got 1D array instead',
fast_mcd, X)
def test_mcd_class_on_invalid_input():
X = np.arange(100)
mcd = MinCovDet()
assert_raise_message(ValueError, 'Expected 2D array, got 1D array instead',
mcd.fit, X)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_mcd_issue3367():
# Check that MCD completes when the covariance matrix is singular
# i.e. one of the rows and columns are all zeros
rand_gen = np.random.RandomState(0)
# Think of these as the values for X and Y -> 10 values between -5 and 5
data_values = np.linspace(-5, 5, 10).tolist()
# Get the cartesian product of all possible coordinate pairs from above set
data = np.array(list(itertools.product(data_values, data_values)))
# Add a third column that's all zeros to make our data a set of point
# within a plane, which means that the covariance matrix will be singular
data = np.hstack((data, np.zeros((data.shape[0], 1))))
# The below line of code should raise an exception if the covariance matrix
# is singular. As a further test, since we have points in XYZ, the
# principle components (Eigenvectors) of these directly relate to the
# geometry of the points. Since it's a plane, we should be able to test
# that the Eigenvector that corresponds to the smallest Eigenvalue is the
# plane normal, specifically [0, 0, 1], since everything is in the XY plane
# (as I've set it up above). To do this one would start by:
#
# evals, evecs = np.linalg.eigh(mcd_fit.covariance_)
# normal = evecs[:, np.argmin(evals)]
#
# After which we need to assert that our `normal` is equal to [0, 0, 1].
# Do note that there is floating point error associated with this, so it's
# best to subtract the two and then compare some small tolerance (e.g.
# 1e-12).
MinCovDet(random_state=rand_gen).fit(data)
def test_mcd_support_covariance_is_zero():
# Check that MCD returns a ValueError with informative message when the
# covariance of the support data is equal to 0.
X_1 = np.array([0.5, 0.1, 0.1, 0.1, 0.957, 0.1, 0.1, 0.1, 0.4285, 0.1])
X_1 = X_1.reshape(-1, 1)
X_2 = np.array([0.5, 0.3, 0.3, 0.3, 0.957, 0.3, 0.3, 0.3, 0.4285, 0.3])
X_2 = X_2.reshape(-1, 1)
msg = ('The covariance matrix of the support data is equal to 0, try to '
'increase support_fraction')
for X in [X_1, X_2]:
assert_raise_message(ValueError, msg, MinCovDet().fit, X)
def test_mcd_increasing_det_warning():
# Check that a warning is raised if we observe increasing determinants
# during the c_step. In theory the sequence of determinants should be
# decreasing. Increasing determinants are likely due to ill-conditioned
# covariance matrices that result in poor precision matrices.
X = [[5.1, 3.5, 1.4, 0.2],
[4.9, 3.0, 1.4, 0.2],
[4.7, 3.2, 1.3, 0.2],
[4.6, 3.1, 1.5, 0.2],
[5.0, 3.6, 1.4, 0.2],
[4.6, 3.4, 1.4, 0.3],
[5.0, 3.4, 1.5, 0.2],
[4.4, 2.9, 1.4, 0.2],
[4.9, 3.1, 1.5, 0.1],
[5.4, 3.7, 1.5, 0.2],
[4.8, 3.4, 1.6, 0.2],
[4.8, 3.0, 1.4, 0.1],
[4.3, 3.0, 1.1, 0.1],
[5.1, 3.5, 1.4, 0.3],
[5.7, 3.8, 1.7, 0.3],
[5.4, 3.4, 1.7, 0.2],
[4.6, 3.6, 1.0, 0.2],
[5.0, 3.0, 1.6, 0.2],
[5.2, 3.5, 1.5, 0.2]]
mcd = MinCovDet(random_state=1)
assert_warns_message(RuntimeWarning,
"Determinant has increased",
mcd.fit, X)
| bsd-3-clause |
Menooker/gem5_pcm | util/stats/barchart.py | 90 | 12472 | # Copyright (c) 2005-2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Lisa Hsu
import matplotlib, pylab
from matplotlib.font_manager import FontProperties
from matplotlib.numerix import array, arange, reshape, shape, transpose, zeros
from matplotlib.numerix import Float
from matplotlib.ticker import NullLocator
matplotlib.interactive(False)
from chart import ChartOptions
class BarChart(ChartOptions):
def __init__(self, default=None, **kwargs):
super(BarChart, self).__init__(default, **kwargs)
self.inputdata = None
self.chartdata = None
self.inputerr = None
self.charterr = None
def gen_colors(self, count):
cmap = matplotlib.cm.get_cmap(self.colormap)
if count == 1:
return cmap([ 0.5 ])
if count < 5:
return cmap(arange(5) / float(4))[:count]
return cmap(arange(count) / float(count - 1))
# The input data format does not match the data format that the
# graph function takes because it is intuitive. The conversion
# from input data format to chart data format depends on the
# dimensionality of the input data. Check here for the
# dimensionality and correctness of the input data
def set_data(self, data):
if data is None:
self.inputdata = None
self.chartdata = None
return
data = array(data)
dim = len(shape(data))
if dim not in (1, 2, 3):
raise AttributeError, "Input data must be a 1, 2, or 3d matrix"
self.inputdata = data
# If the input data is a 1d matrix, then it describes a
# standard bar chart.
if dim == 1:
self.chartdata = array([[data]])
# If the input data is a 2d matrix, then it describes a bar
# chart with groups. The matrix being an array of groups of
# bars.
if dim == 2:
self.chartdata = transpose([data], axes=(2,0,1))
# If the input data is a 3d matrix, then it describes an array
# of groups of bars with each bar being an array of stacked
# values.
if dim == 3:
self.chartdata = transpose(data, axes=(1,2,0))
def get_data(self):
return self.inputdata
data = property(get_data, set_data)
def set_err(self, err):
if err is None:
self.inputerr = None
self.charterr = None
return
err = array(err)
dim = len(shape(err))
if dim not in (1, 2, 3):
raise AttributeError, "Input err must be a 1, 2, or 3d matrix"
self.inputerr = err
if dim == 1:
self.charterr = array([[err]])
if dim == 2:
self.charterr = transpose([err], axes=(2,0,1))
if dim == 3:
self.charterr = transpose(err, axes=(1,2,0))
def get_err(self):
return self.inputerr
err = property(get_err, set_err)
# Graph the chart data.
# Input is a 3d matrix that describes a plot that has multiple
# groups, multiple bars in each group, and multiple values stacked
# in each bar. The underlying bar() function expects a sequence of
# bars in the same stack location and same group location, so the
# organization of the matrix is that the inner most sequence
# represents one of these bar groups, then those are grouped
# together to make one full stack of bars in each group, and then
# the outer most layer describes the groups. Here is an example
# data set and how it gets plotted as a result.
#
# e.g. data = [[[10,11,12], [13,14,15], [16,17,18], [19,20,21]],
# [[22,23,24], [25,26,27], [28,29,30], [31,32,33]]]
#
# will plot like this:
#
# 19 31 20 32 21 33
# 16 28 17 29 18 30
# 13 25 14 26 15 27
# 10 22 11 23 12 24
#
# Because this arrangement is rather conterintuitive, the rearrange
# function takes various matricies and arranges them to fit this
# profile.
#
# This code deals with one of the dimensions in the matrix being
# one wide.
#
def graph(self):
if self.chartdata is None:
raise AttributeError, "Data not set for bar chart!"
dim = len(shape(self.inputdata))
cshape = shape(self.chartdata)
if self.charterr is not None and shape(self.charterr) != cshape:
raise AttributeError, 'Dimensions of error and data do not match'
if dim == 1:
colors = self.gen_colors(cshape[2])
colors = [ [ colors ] * cshape[1] ] * cshape[0]
if dim == 2:
colors = self.gen_colors(cshape[0])
colors = [ [ [ c ] * cshape[2] ] * cshape[1] for c in colors ]
if dim == 3:
colors = self.gen_colors(cshape[1])
colors = [ [ [ c ] * cshape[2] for c in colors ] ] * cshape[0]
colors = array(colors)
self.figure = pylab.figure(figsize=self.chart_size)
outer_axes = None
inner_axes = None
if self.xsubticks is not None:
color = self.figure.get_facecolor()
self.metaaxes = self.figure.add_axes(self.figure_size,
axisbg=color, frameon=False)
for tick in self.metaaxes.xaxis.majorTicks:
tick.tick1On = False
tick.tick2On = False
self.metaaxes.set_yticklabels([])
self.metaaxes.set_yticks([])
size = [0] * 4
size[0] = self.figure_size[0]
size[1] = self.figure_size[1] + .12
size[2] = self.figure_size[2]
size[3] = self.figure_size[3] - .12
self.axes = self.figure.add_axes(size)
outer_axes = self.metaaxes
inner_axes = self.axes
else:
self.axes = self.figure.add_axes(self.figure_size)
outer_axes = self.axes
inner_axes = self.axes
bars_in_group = len(self.chartdata)
width = 1.0 / ( bars_in_group + 1)
center = width / 2
bars = []
for i,stackdata in enumerate(self.chartdata):
bottom = array([0.0] * len(stackdata[0]), Float)
stack = []
for j,bardata in enumerate(stackdata):
bardata = array(bardata)
ind = arange(len(bardata)) + i * width + center
yerr = None
if self.charterr is not None:
yerr = self.charterr[i][j]
bar = self.axes.bar(ind, bardata, width, bottom=bottom,
color=colors[i][j], yerr=yerr)
if self.xsubticks is not None:
self.metaaxes.bar(ind, [0] * len(bardata), width)
stack.append(bar)
bottom += bardata
bars.append(stack)
if self.xlabel is not None:
outer_axes.set_xlabel(self.xlabel)
if self.ylabel is not None:
inner_axes.set_ylabel(self.ylabel)
if self.yticks is not None:
ymin, ymax = self.axes.get_ylim()
nticks = float(len(self.yticks))
ticks = arange(nticks) / (nticks - 1) * (ymax - ymin) + ymin
inner_axes.set_yticks(ticks)
inner_axes.set_yticklabels(self.yticks)
elif self.ylim is not None:
inner_axes.set_ylim(self.ylim)
if self.xticks is not None:
outer_axes.set_xticks(arange(cshape[2]) + .5)
outer_axes.set_xticklabels(self.xticks)
if self.xsubticks is not None:
numticks = (cshape[0] + 1) * cshape[2]
inner_axes.set_xticks(arange(numticks) * width + 2 * center)
xsubticks = list(self.xsubticks) + [ '' ]
inner_axes.set_xticklabels(xsubticks * cshape[2], fontsize=7,
rotation=30)
if self.legend is not None:
if dim == 1:
lbars = bars[0][0]
if dim == 2:
lbars = [ bars[i][0][0] for i in xrange(len(bars))]
if dim == 3:
number = len(bars[0])
lbars = [ bars[0][number - j - 1][0] for j in xrange(number)]
if self.fig_legend:
self.figure.legend(lbars, self.legend, self.legend_loc,
prop=FontProperties(size=self.legend_size))
else:
self.axes.legend(lbars, self.legend, self.legend_loc,
prop=FontProperties(size=self.legend_size))
if self.title is not None:
self.axes.set_title(self.title)
def savefig(self, name):
self.figure.savefig(name)
def savecsv(self, name):
f = file(name, 'w')
data = array(self.inputdata)
dim = len(data.shape)
if dim == 1:
#if self.xlabel:
# f.write(', '.join(list(self.xlabel)) + '\n')
f.write(', '.join([ '%f' % val for val in data]) + '\n')
if dim == 2:
#if self.xlabel:
# f.write(', '.join([''] + list(self.xlabel)) + '\n')
for i,row in enumerate(data):
ylabel = []
#if self.ylabel:
# ylabel = [ self.ylabel[i] ]
f.write(', '.join(ylabel + [ '%f' % v for v in row]) + '\n')
if dim == 3:
f.write("don't do 3D csv files\n")
pass
f.close()
if __name__ == '__main__':
from random import randrange
import random, sys
dim = 3
number = 5
args = sys.argv[1:]
if len(args) > 3:
sys.exit("invalid number of arguments")
elif len(args) > 0:
myshape = [ int(x) for x in args ]
else:
myshape = [ 3, 4, 8 ]
# generate a data matrix of the given shape
size = reduce(lambda x,y: x*y, myshape)
#data = [ random.randrange(size - i) + 10 for i in xrange(size) ]
data = [ float(i)/100.0 for i in xrange(size) ]
data = reshape(data, myshape)
# setup some test bar charts
if True:
chart1 = BarChart()
chart1.data = data
chart1.xlabel = 'Benchmark'
chart1.ylabel = 'Bandwidth (GBps)'
chart1.legend = [ 'x%d' % x for x in xrange(myshape[-1]) ]
chart1.xticks = [ 'xtick%d' % x for x in xrange(myshape[0]) ]
chart1.title = 'this is the title'
if len(myshape) > 2:
chart1.xsubticks = [ '%d' % x for x in xrange(myshape[1]) ]
chart1.graph()
chart1.savefig('/tmp/test1.png')
chart1.savefig('/tmp/test1.ps')
chart1.savefig('/tmp/test1.eps')
chart1.savecsv('/tmp/test1.csv')
if False:
chart2 = BarChart()
chart2.data = data
chart2.colormap = 'gray'
chart2.graph()
chart2.savefig('/tmp/test2.png')
chart2.savefig('/tmp/test2.ps')
# pylab.show()
| bsd-3-clause |
cauchycui/scikit-learn | examples/ensemble/plot_partial_dependence.py | 249 | 4456 | """
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [1]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [HTF2009]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [HTF2009] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [1] For classification you can think of it as the regression score before
the link function.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
# fetch California housing dataset
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print('_' * 80)
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print("done.")
print('_' * 80)
print('Convenience plot with ``partial_dependence_plots``')
print
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('_' * 80)
print('Custom 3d plot via ``partial_dependence``')
print
fig = plt.figure()
target_feature = (1, 5)
pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
| bsd-3-clause |
WangWenjun559/Weiss | summary/sumy/sklearn/tree/tests/test_export.py | 76 | 9318 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=1,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="X[1] <= -1.5\\nsamples = 3\\n' \
'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \
'[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \
'1 -> 2 ;\n' \
'3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \
'[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \
'1 -> 3 ;\n' \
'4 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 4 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \
'[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \
'4 -> 5 ;\n' \
'6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'4 -> 6 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=1,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e581397f"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
| apache-2.0 |
jbarrow/LambdaNet | docs/analysis.py | 4 | 1453 | import numpy as np
import matplotlib.pyplot as plt
def plot_activation_functions(neuron_type):
"""
Load the text file generated by the Haskell documentation
script and use it to generate a png of the activation function.
"""
x = np.arange(-10.,10.01,0.01)
y = np.loadtxt(neuron_type + ".txt")
title = neuron_type.title()
if len(neuron_type.split("_")) == 2:
split = neuron_type.split("_")
title = split[0].title() + " of " + split[1].title()
plt.figure()
plt.title(title + " Neuron Activation Graph")
plt.plot(x, y)
plt.savefig("images/" + neuron_type + ".png")
def plot_distribution_functions(distribution):
"""
Load the text file generated in Haskell and use it to
generate a histogram of the data with 50 bins
"""
data = np.loadtxt(distribution + ".txt")
plt.figure()
plt.hist(data, bins=50)
plt.savefig("images/" + distribution + ".png")
def main():
# Iterate through the activation functions and their derivatives
neuron_types = [
"sigmoid", "reclu", "tanh",
"derivative_sigmoid", "derivative_reclu", "derivative_tanh"
]
for activation in neuron_types:
plot_activation_functions(activation)
distribution_types = [
"normal", "uniform", "bounded_uniform"
]
for distribution in distribution_types:
plot_distribution_functions(distribution)
if __name__ == '__main__':
main()
| mit |
jsilter/scipy | doc/source/tutorial/examples/newton_krylov_preconditioning.py | 99 | 2489 | import numpy as np
from scipy.optimize import root
from scipy.sparse import spdiags, kron
from scipy.sparse.linalg import spilu, LinearOperator
from numpy import cosh, zeros_like, mgrid, zeros, eye
# parameters
nx, ny = 75, 75
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
def get_preconditioner():
"""Compute the preconditioner M"""
diags_x = zeros((3, nx))
diags_x[0,:] = 1/hx/hx
diags_x[1,:] = -2/hx/hx
diags_x[2,:] = 1/hx/hx
Lx = spdiags(diags_x, [-1,0,1], nx, nx)
diags_y = zeros((3, ny))
diags_y[0,:] = 1/hy/hy
diags_y[1,:] = -2/hy/hy
diags_y[2,:] = 1/hy/hy
Ly = spdiags(diags_y, [-1,0,1], ny, ny)
J1 = kron(Lx, eye(ny)) + kron(eye(nx), Ly)
# Now we have the matrix `J_1`. We need to find its inverse `M` --
# however, since an approximate inverse is enough, we can use
# the *incomplete LU* decomposition
J1_ilu = spilu(J1)
# This returns an object with a method .solve() that evaluates
# the corresponding matrix-vector product. We need to wrap it into
# a LinearOperator before it can be passed to the Krylov methods:
M = LinearOperator(shape=(nx*ny, nx*ny), matvec=J1_ilu.solve)
return M
def solve(preconditioning=True):
"""Compute the solution"""
count = [0]
def residual(P):
count[0] += 1
d2x = zeros_like(P)
d2y = zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2])/hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y + 5*cosh(P).mean()**2
# preconditioner
if preconditioning:
M = get_preconditioner()
else:
M = None
# solve
guess = zeros((nx, ny), float)
sol = root(residual, guess, method='krylov',
options={'disp': True,
'jac_options': {'inner_M': M}})
print 'Residual', abs(residual(sol.x)).max()
print 'Evaluations', count[0]
return sol.x
def main():
sol = solve(preconditioning=True)
# visualize
import matplotlib.pyplot as plt
x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
plt.clf()
plt.pcolor(x, y, sol)
plt.clim(0, 1)
plt.colorbar()
plt.show()
if __name__ == "__main__":
main()
| bsd-3-clause |
idealabasu/code_pynamics | python/pynamics_examples/standing_stability_test.py | 1 | 13235 | #!/usr/bin/env python
# coding: utf-8
# ---
# title: Triple Pendulum Example
# type: submodule
# ---
# In[1]:
# Try running with this variable set to true and to false and see the difference in the resulting equations of motion
# In[2]:
global_q = False
# Import all the necessary modules
# In[3]:
# -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes
Email: danaukes<at>gmail.com
Please see LICENSE for full license.
"""
import pynamics
from pynamics.frame import Frame
from pynamics.variable_types import Differentiable,Constant
from pynamics.system import System
from pynamics.body import Body
from pynamics.dyadic import Dyadic
from pynamics.output import Output,PointsOutput
from pynamics.particle import Particle
import pynamics.integration
import numpy
import matplotlib.pyplot as plt
plt.ion()
from math import pi
import scipy.interpolate
import sympy
# The next two lines create a new system object and set that system as the global system within the module so that other variables can use and find it.
# In[4]:
system = System()
pynamics.set_system(__name__,system)
# ## Parameterization
#
# ### Constants
#
# Declare constants and seed them with their default value. This can be changed at integration time but is often a nice shortcut when you don't want the value to change but you want it to be represented symbolically in calculations
# In[5]:
lA = Constant(1,'lA',system)
lB = Constant(1,'lB',system)
lC = Constant(6*25.4/1000,'lC',system)
mA = Constant(1,'mA',system)
mB = Constant(1,'mB',system)
mC = Constant(1,'mC',system)
m1 = Constant(2,'m1',system)
g = Constant(9.81,'g',system)
b = Constant(1e1,'b',system)
k1 = Constant(1e2,'k1',system)
k2 = Constant(1e1,'k2',system)
preload1 = Constant(0*pi/180,'preload1',system)
preload2 = Constant(0*pi/180,'preload2',system)
preload3 = Constant(0*pi/180,'preload3',system)
Ixx_A = Constant(1,'Ixx_A',system)
Iyy_A = Constant(1,'Iyy_A',system)
Izz_A = Constant(1,'Izz_A',system)
Ixx_B = Constant(1,'Ixx_B',system)
Iyy_B = Constant(1,'Iyy_B',system)
Izz_B = Constant(1,'Izz_B',system)
Ixx_C = Constant(1,'Ixx_C',system)
Iyy_C = Constant(1,'Iyy_C',system)
Izz_C = Constant(1,'Izz_C',system)
k_constraint = Constant(1e4,'k_constraint',system)
b_constraint = Constant(1e2,'b_constraint',system)
force_var = sympy.Symbol('fv')
# ## Integration Tolerance
# Specify the precision of the integration
# In[6]:
tol = 1e-11
# ### Time
# Define variables for time that can be used throughout the script. These get used to create the t array, a list of every time value that is solved for during integration
# In[7]:
tinitial = 0
tfinal = 10
fps = 30
tstep = 1/fps
t = numpy.r_[tinitial:tfinal:tstep]
force = t*0
ii = (t==3).nonzero()[0][0]
jj = (t==5).nonzero()[0][0]
force[ii:jj] = 10
f_force = scipy.interpolate.interp1d(t, force,fill_value='extrapolate')
# ### Differentiable State Variables
#
# Define your differentiable state variables that you will use to model the state of the system. In this case $qA$, $qB$, and $qC$ are the rotation angles of a three-link mechanism
# In[8]:
x,x_d,x_dd = Differentiable('x',system)
y,y_d,y_dd = Differentiable('y',system)
qA,qA_d,qA_dd = Differentiable('qA',system)
qB,qB_d,qB_dd = Differentiable('qB',system)
qC,qC_d,qC_dd = Differentiable('qC',system)
x2,x2_d,x2_dd = Differentiable('x2',system)
# ### Initial Values
# Define a set of initial values for the position and velocity of each of your state variables. It is necessary to define a known. This code create a dictionary of initial values.
# In[9]:
initialvalues = {}
initialvalues[x]=0
initialvalues[x_d]=0
initialvalues[y]=2.01
initialvalues[y_d]=0
initialvalues[qA]=0*pi/180
initialvalues[qA_d]=0*pi/180
initialvalues[qB]=0*pi/180
initialvalues[qB_d]=0*pi/180
initialvalues[qC]=0*pi/180
initialvalues[qC_d]=0*pi/180
initialvalues[x2]=-1.5
initialvalues[x2_d]=.5
# These two lines of code order the initial values in a list in such a way that the integrator can use it in the same order that it expects the variables to be supplied
# In[10]:
statevariables = system.get_state_variables()
ini = [initialvalues[item] for item in statevariables]
# ## Kinematics
#
# ### Frames
# Define the reference frames of the system
# In[11]:
N = Frame('N')
A = Frame('A')
B = Frame('B')
C = Frame('C')
# ### Newtonian Frame
#
# It is important to define the Newtonian reference frame as a reference frame that is not accelerating, otherwise the dynamic equations will not be correct
# In[12]:
system.set_newtonian(N)
# This is the first time that the "global_q" variable is used. If you choose to rotate each frame with reference to the base frame, there is the potential for a representational simplification. If you use a relative rotation, this can also be simpler in some cases. Try running the code either way to see which one is simpler in this case.
# In[13]:
A.rotate_fixed_axis_directed(N,[0,0,1],qA,system)
B.rotate_fixed_axis_directed(A,[0,0,1],qB,system)
C.rotate_fixed_axis_directed(B,[0,0,1],qC,system)
# ### Vectors
# Define the vectors that describe the kinematics of a series of connected lengths
#
# * pNA - This is a vector with position at the origin.
# * pAB - This vector is length $l_A$ away from the origin along the A.x unit vector
# * pBC - This vector is length $l_B$ away from the pAB along the B.x unit vector
# * pCtip - This vector is length $l_C$ away from the pBC along the C.x unit vector
# In[14]:
pNA=x*N.x+y*N.y
pAB=pNA-lA*A.y
pBC=pAB-lB*B.y
pC1 = pBC - lC/2*C.x
pC2 = pBC + lC/2*C.x
pm1 = x2*N.x+2*N.y
vNA = pNA.time_derivative()
vC1 = pC1.time_derivative()
vC2 = pC2.time_derivative()
vm1 = pm1.time_derivative()
# ## Centers of Mass
#
# It is important to define the centers of mass of each link. In this case, the center of mass of link A, B, and C is halfway along the length of each
# In[15]:
pAcm=pNA-lA/2*A.y
pBcm=pAB-lB/2*B.y
pCcm=pBC
# ## Calculating Velocity
#
# The angular velocity between frames, and the time derivatives of vectors are extremely useful in calculating the equations of motion and for determining many of the forces that need to be applied to your system (damping, drag, etc). Thus, it is useful, once kinematics have been defined, to take or find the derivatives of some of those vectors for calculating linear or angular velocity vectors
#
# ### Angular Velocity
# The following three lines of code computes and returns the angular velocity between frames N and A (${}^N\omega^A$), A and B (${}^A\omega^B$), and B and C (${}^B\omega^C$). In other cases, if the derivative expression is complex or long, you can supply pynamics with a given angular velocity between frames to speed up computation time.
# In[16]:
wNA = N.getw_(A)
wAB = A.getw_(B)
wBC = B.getw_(C)
# ### Vector derivatives
# The time derivatives of vectors may also be
# vCtip = pCtip.time_derivative(N,system)
# ### Define Inertias and Bodies
# The next several lines compute the inertia dyadics of each body and define a rigid body on each frame. In the case of frame C, we represent the mass as a particle located at point pCcm.
# In[17]:
IA = Dyadic.build(A,Ixx_A,Iyy_A,Izz_A)
IB = Dyadic.build(B,Ixx_B,Iyy_B,Izz_B)
IC = Dyadic.build(B,Ixx_C,Iyy_C,Izz_C)
BodyA = Body('BodyA',A,pAcm,mA,IA,system)
BodyB = Body('BodyB',B,pBcm,mB,IB,system)
BodyC = Body('BodyC',C,pCcm,mC,IC,system)
ParticleM = Particle(pm1,m1,'ParticleM',system)
# ## Forces and Torques
# Forces and torques are added to the system with the generic ```addforce``` method. The first parameter supplied is a vector describing the force applied at a point or the torque applied along a given rotational axis. The second parameter is the vector describing the linear speed (for an applied force) or the angular velocity(for an applied torque)
# In[18]:
stretch1 = -pC1.dot(N.y)
stretch1_s = (stretch1+abs(stretch1))
on = stretch1_s/(2*stretch1+1e-10)
system.add_spring_force1(k_constraint,-stretch1_s*N.y,vC1)
system.addforce(-b_constraint*vC1*on,vC1)
toeforce = k_constraint*-stretch1_s
stretch2 = -pC2.dot(N.y)
stretch2_s = (stretch2+abs(stretch2))
on = stretch2_s/(2*stretch2+1e-10)
system.add_spring_force1(k_constraint,-stretch2_s*N.y,vC2)
system.addforce(-b_constraint*vC2*on,vC2)
system.addforce(-b*wNA,wNA)
system.addforce(-b*wAB,wAB)
system.addforce(-b*wBC,wBC)
# system.addforce(force_var*N.x,vNA)
stretch3_v = (pm1 - pNA)
stretch3_uv = 1/(stretch3_v.length() + 1e-10)* stretch3_v
stretch3 = 1-(pm1 - pNA).length()
stretch3_s = (stretch3+abs(stretch3))
on = stretch3_s/(2*stretch3+1e-10)
system.add_spring_force2(k_constraint,-stretch3_uv*stretch3_s,vm1,-vNA)
# system.addforce(-b_constraint*vC2*on,vC2)
# ### Spring Forces
#
# Spring forces are a special case because the energy stored in springs is conservative and should be considered when calculating the system's potential energy. To do this, use the ```add_spring_force``` command. In this method, the first value is the linear spring constant. The second value is the "stretch" vector, indicating the amount of deflection from the neutral point of the spring. The final parameter is, as above, the linear or angluar velocity vector (depending on whether your spring is a linear or torsional spring)
#
# In this case, the torques applied to each joint are dependent upon whether qA, qB, and qC are absolute or relative rotations, as defined above.
# In[19]:
# system.add_spring_force1(k1,(qA-preload1)*N.z,wNA)
system.add_spring_force1(k2,(qB-preload2)*A.z,wAB)
system.add_spring_force1(k1,(qC-preload3)*B.z,wBC)
# ### Gravity
# Again, like springs, the force of gravity is conservative and should be applied to all bodies. To globally apply the force of gravity to all particles and bodies, you can use the special ```addforcegravity``` method, by supplying the acceleration due to gravity as a vector. This will get applied to all bodies defined in your system.
# In[20]:
system.addforcegravity(-g*N.y)
# ## Constraints
# Constraints may be defined that prevent the motion of certain elements. Try uncommenting the commented out line to see what happens.
# In[21]:
eq = []
# eq.append(pCtip.dot(N.y))
eq_d=[(system.derivative(item)) for item in eq]
eq_dd=[(system.derivative(item)) for item in eq_d]
# ## F=ma
# This is where the symbolic expressions for F and ma are calculated. This must be done after all parts of the system have been defined. The ```getdynamics``` function uses Kane's method to derive the equations of motion.
# In[22]:
f,ma = system.getdynamics()
# In[23]:
f
# In[24]:
ma
# ## Solve for Acceleration
#
# The next line of code solves the system of equations F=ma plus any constraint equations that have been added above. It returns one or two variables. func1 is the function that computes the velocity and acceleration given a certain state, and lambda1(optional) supplies the function that computes the constraint forces as a function of the resulting states
#
# There are a few ways of solveing for a. The below function inverts the mass matrix numerically every time step. This can be slower because the matrix solution has to be solved for, but is sometimes more tractable than solving the highly nonlinear symbolic expressions that can be generated from the previous step. The other options would be to use ```state_space_pre_invert```, which pre-inverts the equations symbolically before generating a numerical function, or ```state_space_post_invert2```, which adds Baumgarte's method for intermittent constraints.
# In[25]:
func1,lambda1 = system.state_space_post_invert(f,ma,eq_dd,return_lambda = True,variable_functions={force_var:f_force})
# ## Integrate
#
# The next line of code integrates the function calculated
# In[26]:
states=pynamics.integration.integrate_odeint(func1,ini,t,rtol=tol,atol=tol,hmin=tol, args=({'constants':system.constant_values},))
# ## Outputs
#
#
# The next section simply calculates and plots a variety of data from the previous simulation
# ### States
# In[27]:
plt.figure()
artists = plt.plot(t,states[:,:5])
plt.legend(artists,['x','y','qA','qB','qC'])
# ### Energy
# In[28]:
# KE = system.get_KE()
# PE = system.getPEGravity(pNA) - system.getPESprings()
# energy_output = Output([KE-PE],system)
# energy_output.calc(states)
# energy_output.plot_time()
# ### Motion
# In[29]:
points = [pm1,pNA,pAB,pBC,pC1,pC2]
points_output = PointsOutput(points,system)
y = points_output.calc(states)
points_output.plot_time(5)
# #### Motion Animation
# in normal Python the next lines of code produce an animation using matplotlib
# In[30]:
points_output.animate(fps = fps,movie_name = 'render.mp4',lw=2,marker='o',color=(1,0,0,1),linestyle='-')
#a()
# To plot the animation in jupyter you need a couple extra lines of code...
# In[31]:
# from matplotlib import animation, rc
# from IPython.display import HTML
# HTML(points_output.anim.to_html5_video())
# ### Constraint Forces
# This line of code computes the constraint forces once the system's states have been solved for.
# In[32]:
# lambda2 = numpy.array([lambda1(item1,item2,system.constant_values) for item1,item2 in zip(t,states)])
# plt.figure()
# plt.plot(t, lambda2)
# In[ ]:
| mit |
LaboratoireMecaniqueLille/crappy | util/open_hdf.py | 1 | 1723 | # coding: utf-8
import tables
import sys
# import numpy as np
import matplotlib.pyplot as plt
NODE = "table" # Node containing the array to read
# If the range node is not specified, the digital levels (ints) will
# be returned
# If specified, it will use it to turn these levels in mV (SLOWER!)
# RANGE_NODE = None # The name of the node storing the ranges of each channel
RANGE_NODE = "factor"
# Change here to ignore the prompt
filename = None
start = 0 # Start to read from
stop = None # Where to stop (leave none to read the whole file)
step = None # Step (if None, will be computed and suggested)
if sys.version_info.major > 2:
raw_input = input
filename = filename if len(sys.argv) == 1 else sys.argv[-1]
if not filename:
filename = raw_input("File name?")
h = tables.open_file(filename)
print(h)
arr = getattr(h.root, NODE)
lines, rows = arr.shape
print(lines, "lines and", rows, "rows")
a = rows * lines // 500000 or 1
if a > 1:
print("How many lines do you want to skip on each read?")
print("1 will read ALL the data (may use too much memory!)")
print("Suggested value:", a)
if not step:
step = int(raw_input("Read one out of how many lines?({})".format(a)) or a)
else:
step = a
i = 0
if not hasattr(h.root, RANGE_NODE):
RANGE_NODE = None
print("Reading...")
if RANGE_NODE:
out = arr.read(start=start, stop=stop, step=step).astype(float)
else:
out = arr.read(start=start, stop=stop, step=step)
print(out.shape)
if RANGE_NODE:
ranges = getattr(h.root, RANGE_NODE).read()
h.close()
print("Applying factor...")
for i, r in enumerate(ranges):
out[:, i] *= r
else:
h.close()
print("Plotting...")
for i in range(rows):
plt.plot(out[:, i])
print("Done!")
plt.show()
| gpl-2.0 |
toastedcornflakes/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 19 | 12101 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import scipy.sparse as sp
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
X_sparse, y_dense = load_svmlight_file(datafile)
X_dense = X_sparse.toarray()
y_sparse = sp.csr_matrix(y_dense)
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
X_sliced = X_sparse[np.arange(X_sparse.shape[0])]
y_sliced = y_sparse[np.arange(y_sparse.shape[0])]
for X in (X_sparse, X_dense, X_sliced):
for y in (y_sparse, y_dense, y_sliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
if (sp.issparse(y) and y.shape[0] == 1):
# make sure y's shape is: (n_samples, n_labels)
# when it is sparse
y = y.T
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
X2_dense = X2.toarray()
if dtype == np.float32:
# allow a rounding error at the last decimal place
assert_array_almost_equal(
X_dense.astype(dtype), X2_dense, 4)
assert_array_almost_equal(
y_dense.astype(dtype), y2, 4)
else:
# allow a rounding error at the last decimal place
assert_array_almost_equal(
X_dense.astype(dtype), X2_dense, 15)
assert_array_almost_equal(
y_dense.astype(dtype), y2, 15)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y_dense = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
y_sparse = sp.csr_matrix(y_dense)
for y in [y_dense, y_sparse]:
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
OshynSong/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 276 | 3790 | # Authors: Lars Buitinck <[email protected]>
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
RohitSaha/GyPsY | EmotionAPIIntegration.py | 1 | 3928 | from __future__ import print_function
import time
import requests
import cv2
import operator
import numpy as np
import matplotlib.pyplot as plt
_url = 'https://westus.api.cognitive.microsoft.com/emotion/v1.0/recognize'
_key = '9f8453cfca9946db9bbe0dd80d43ad10'
_maxNumRetries = 10
def processRequest(json, data, headers, params):
"""
Helper function to process the request to Project Oxford
Parameters:
json: Used when processing images from its URL. See API Documentation
data: Used when processing image read from disk. See API Documentation
headers: Used to pass the key information and the data type request
"""
retries = 0
result = None
while True:
response = requests.request('post', _url, json=json, data=data, headers=headers, params=params)
if response.status_code == 429:
print("Message: %s" % (response.json()['error']['message']))
if retries <= _maxNumRetries:
time.sleep(1)
retries += 1
continue
else:
print('Error: failed after retrying!')
break
elif response.status_code == 200 or response.status_code == 201:
if 'content-length' in response.headers and int(response.headers['content-length']) == 0:
result = None
elif 'content-type' in response.headers and isinstance(response.headers['content-type'], str):
if 'application/json' in response.headers['content-type'].lower():
result = response.json() if response.content else None
elif 'image' in response.headers['content-type'].lower():
result = response.content
else:
print("Error code: %d" % (response.status_code))
print("Message: %s" % (response.json()['error']['message']))
break
return result
def renderResultOnImage(result, img):
"""Display the obtained results onto the input image"""
for currFace in result:
faceRectangle = currFace['faceRectangle']
cv2.rectangle(img, (faceRectangle['left'], faceRectangle['top']),
(faceRectangle['left'] + faceRectangle['width'], faceRectangle['top'] + faceRectangle['height']),
color=(255, 0, 0), thickness=5)
for currFace in result:
faceRectangle = currFace['faceRectangle']
currEmotion = max(currFace['scores'].items(), key=operator.itemgetter(1))[0]
textToWrite = "%s" % (currEmotion)
global emotion
emotion = textToWrite
cv2.putText(img, textToWrite, (faceRectangle['left'], faceRectangle['top'] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(255, 0, 0), 1)
def get_emotion():
headers = dict()
headers['Ocp-Apim-Subscription-Key'] = _key
headers['Content-Type'] = 'application/octet-stream'
json = None
params = None
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
cv2.imshow('frame', frame)
key = cv2.waitKey(1)
if key == 13:
break
cv2.imwrite('Unique.jpg', frame)
pathToFileInDisk = r'Unique.jpg'
with open( pathToFileInDisk, 'rb') as f:
data = f.read()
#data = frame
result = processRequest( json, data, headers, params )
if result is not None:
# Load the original image from disk
data8uint = np.fromstring(data, np.uint8) # Convert string to an unsigned int array
img = cv2.cvtColor(cv2.imdecode(data8uint, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
renderResultOnImage(result, img)
ig, ax = plt.subplots(figsize=(15, 20))
ax.imshow(img)
#print(data8uint)
plt.show()
return emotion
#return data8uint
| mit |
IssamLaradji/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
hehongliang/tensorflow | tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py | 137 | 2219 | # encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
from tensorflow.python.platform import test
class CategoricalTest(test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(
min_frequency=0, share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"],
["3", "Male"]])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
test.main()
| apache-2.0 |
ehua7365/RibbonOperators | TEBD/tebdIsing4.py | 1 | 11536 | """
tebdIsing4.py
Use TEBD to compute ground state of transverse field Ising model.
2014-09-17
"""
import numpy as np
from cmath import *
from mpstest17 import *
import matplotlib.pyplot as plt
import time
def main():
test0()
def test0():
(J,hx) = (0,1)
(nsteps,chi) = (100,5)
for N in [3]:
test2(J,hx,N,1.0/nsteps,nsteps,chi)
def test1():
print(getU(1,1,1))
a = np.random.rand(3,3)
show(a,"a")
show(np.linalg.eig(a)[0],"eignenvalues of a")
ea = expm(a)
show(ea,"e^a")
show(np.linalg.eig(ea)[0],"eigenvalues of e^a")
show(np.log(np.linalg.eig(ea)[0]),"log of eigenvalues of e^a")
print(s(0),"I")
print(s(1),"X")
print(s(2),"Y")
print(s(3),"Z")
def test2(J,hx,N,t,nsteps,chi):
print("\nStarting TEBD for N = %d Ising model with parameters:"%N)
print(" (J,hx,N) = (%.4f,%.4f,%d)"%(J,hx,N))
print(" (t,nsteps,chi) = (%f,%d,%d)"%(t,nsteps,chi))
startTime = time.clock()
mps = tebdIsing(J,hx,N,t,nsteps,chi)
runTime = time.clock()-startTime
print("Simulation completed in %f seconds"%runTime)
gs = getStateOBC(mps)
gs = gs/np.linalg.norm(gs) # Normalize state
## show(gs,"Ground State")
print("Ground state =")
displayState(gs)
startTime = time.clock()
energyBF = getEnergyBruteForce(J,hx,N,gs).real
print("Energy = %f, Energy per spin = %f"%(energyBF,energyBF/N))
runTime = time.clock() - startTime
print("Brute force energy computed in %f seconds"%runTime)
startTime = time.clock()
energy = getEnergy(J,hx,N,mps).real
print("Energy = %f, Energy per spin = %f"%(energy,energy/N))
runTime = time.clock() - startTime
print("MPO energy computed in %f seconds"%runTime)
def test3():
print(pairHamiltonianMPO())
def isingH(J,hx,N):
"""
Full matrix representation of ising model hamiltonian.
"""
X = 3
Z = 1
pairs = np.zeros((2**N,2**N),dtype=complex)
for i in xrange(N-1):
pairs += pauli([X,X],[i,i+1],N)
fields = np.zeros((2**N,2**N),dtype=complex)
for i in xrange(N):
fields += pauli([Z],[i],N)
return -J*pairs-hx*fields
def tebdIsing(J,hx,N,t,nsteps,chi):
"""
Run TEBD algorithm on 1D N-spin transverse field Ising model.
Uses open boundary conditions and imaginary time evolution.
Parameters
----------
J : float
Pair-wise interaction energy.
hx : float
Magnetic energy in transverse B-field.
N : int
Number of spins.
t : float
Timestep of each iteration.
nsteps : int
Number of time evolution iterations simulated.
Returns
-------
groundState : list
MPS representation of ground state.
energies : (nsteps) ndarray
Energies at each timestep.
"""
# Initiate system with random MPS state.
d = 2
state = randomMPSOBC(N,chi,d,real=True)
## state = allEqual(N)
print("Intial state")
displayState(getStateOBC(state))
# Initilise list of energies at each iteration
# Compute time evolution operators.
U = getU(J,hx,t) # Acts on pairs of spins in middle
Ub = getUb(J,hx,t) # Acts on only one boundary spin
energies = []
# Run iteration nstep times
for step in xrange(nsteps):
# First half evolution
# Evolve first two spins
state[0],state[1] = leftPair(state[0],state[1],U,chi,d)
# Evolve middle spins
for i in xrange(2,N-2,2):
state[i],state[i+1] = middlePair(state[i],state[i+1],U,chi,d)
# Evolve last spin pair (or single spin if odd)
## show(state[-1].shape,"state[-1]")
if N % 2 and N > 2: # N is odd
state[-1] = rightSingle(state[-1],Ub)
## print("odd")
elif N > 2: # N is even
state[-2],state[-1] = rightPair(state[-2],state[-1],U,chi,d)
## show(state[-1].shape,"state[-1]")
# Second half evolution
# Evolve first spin
state[0] = leftSingle(state[0],Ub)
# Evolve middle spins
for i in xrange(1,N-2,2):
state[i],state[i+1] = middlePair(state[i],state[i+1],U,chi,d)
## show(state[-1].shape,"state[-1]")
state[-1] = rightSingle(state[-1],Ub)
## show(state[-1].shape,"state[-1]")
## # Evolve last spin (or spin pair if odd)
## show(state[-1].shape,"state[-1]")
if N % 2 and N > 2: # N is odd
state[-2],state[-1] = rightPair(state[-2],state[-1],U,chi,d)
elif N > 2: # N is even and greater than 2
state[-1] = rightSingle(state[-1],Ub)
## energies.append(getEnergy(state))
## if innerProductOBC(state,state) > 1e100:
## state = [s/1e10 for s in state]
## show(sum([np.sum(np.abs(s)) for s in state]),"sum state")
## show(innerProductOBC(state,state),"<a|a>")
energies.append(getEnergy(J,hx,N,state))
plt.plot(energies)
plt.show()
return state
def middlePair(A,B,U,chi,d):
"""
Evolve a pair of spins in middle.
"""
lbd = A.shape[0] # Left bond dimension
rbd = B.shape[2] # Right bond dimension
theta = np.tensordot(A,U,axes=(1,2))
theta = np.tensordot(theta,B,axes=((1,4),(0,1)))
theta = np.reshape(theta,(lbd*d,rbd*d))
(a,b) = efficientSVD(theta,chi)
a = np.reshape(a,(lbd,d,a.shape[1]))
b = np.reshape(b,(b.shape[0],d,rbd))
return (a,b)
def leftPair(A,B,U,chi,d):
"""
Evolve a pair of spins on left.
"""
rbd = B.shape[2] # Right bond dimension
theta = np.tensordot(A,U,axes=(0,2))
theta = np.tensordot(theta,B,axes=((0,3),(0,1)))
theta = np.reshape(theta,(d,d*rbd))
(a,b) = efficientSVD(theta,chi)
b = np.reshape(b,(b.shape[0],d,rbd))
return (a,b)
def rightPair(A,B,U,chi,d):
"""
Evolve a pair of spins on right.
"""
lbd = A.shape[0] # Left bond dimension
## show(A.shape,"A")
## show(B.shape,"B")
## show(U.shape,"U")
theta = np.tensordot(A,U,axes=(1,2))
## show(theta.shape,"A*U")
theta = np.tensordot(theta,B,axes=((1,4),(0,1)))
## show(theta.shape,"A*U*B")
theta = np.reshape(theta,(lbd*d,d))
(a,b) = efficientSVD(theta,chi)
a = np.reshape(a,(lbd,d,a.shape[1]))
return (a,b)
def leftSingle(A,Ub):
"""
Evolve a single spin on left end.
"""
## show(A.shape,"leftSingleA")
## show(Ub.shape,"leftSingleUb")
return np.tensordot(Ub,A,axes=(1,0))
def rightSingle(A,Ub):
"""
Evolve a single spin on right end.
"""
return np.tensordot(A,Ub,axes=(1,1))
def pairHamiltonianMPO():
X = s(3)
XX = np.kron(X,X)
(a,b) = efficientSVD(XX,10)
(a,b) = (np.reshape(a,(2,2,4)),np.reshape(b,(4,2,2)))
## print(np.reshape(np.tensordot(a,b,(-1,0)),(4,4)))
return (a,b)
def getEnergy(J,hx,N,mps):
"""
Energy <a|H|a> of a state |a> by transfer matrices.
Parameters
----------
J : float
Coupling constant.
hx : float
Product of magnetic moment and field.
N : int
Number of spins.
mps : list
MPS representation of state.
"""
# Local energy
I = np.reshape(s(0),(1,2,2,1))
X = np.reshape(s(3),(1,2,2,1))
Z = np.reshape(s(1),(1,2,2,1))
localEnergy = 0
for i in xrange(0,N-1):
hamiltonian = [I for x in xrange(N)]
## show((hamiltonian),"Hamiltonian MPO")
## show(N,"N")
## show(i,"i")
## show(hamiltonian[i],"hamiltonian[i]")
## show(hamiltonian[i+1],"hamiltonian[i+1]")
hamiltonian[i] = X
hamiltonian[i+1] = X
hamiltonian[0] = np.reshape(hamiltonian[0],(2,2,1))
hamiltonian[-1] = np.reshape(hamiltonian[-1],(1,2,2))
localEnergy += operatorInnerOBC(mps,hamiltonian,mps)
# Field energy
fieldEnergy = 0
for i in xrange(N):
hamiltonian = [I for x in xrange(N)]
hamiltonian[i] = Z
hamiltonian[0] = np.reshape(hamiltonian[0],(2,2,1))
hamiltonian[-1] = np.reshape(hamiltonian[-1],(1,2,2))
fieldEnergy += operatorInnerOBC(mps,hamiltonian,mps)
return (-J*localEnergy-hx*fieldEnergy)/\
innerProductOBC(mps,mps)
def getEnergyBruteForce(J,hx,N,state):
"""
Energy of state by brute force with 2**N by 2**N Hamiltonian matrix.
E = <a|H|a>.
Parameters
----------
state : (2**N,) ndarray
State vector of system.
Returns
-------
energy : complex
Energy of the system.
"""
H = isingH(J,hx,N)
(energies,states) = np.linalg.eig(H)
minEnergy = min(energies)
i = list(energies).index(minEnergy)
print("Sorted Eigenvalues of Hamiltonian:")
print(np.round(np.sort(energies.real),decimals=4))
gs = states[:,i]
gs = np.reshape(gs,gs.size)
print("Brute force ground state")
displayState(gs)
print("Brute force hamiltonian eigendecomposition energy")
print(energies[i].real)
return np.dot(np.conj(state),np.dot(H,state))
def getUb(J,hx,t):
"""
Time evolution operators acting on boundaries.
Parameters
----------
J : float
Pair-wise interaction energy.
hx : float
Magnetic energy of each spin with dipole moment mu in field B.
t : float
Timestep of each iteration.
Returns
-------
startU : (2,2) ndarray
Non-unitary evolution operator acting on single qubit at boundary.
"""
Z = s(1)
return expm(-hx*Z*t*0.5)
def getU(J,hx,t):
"""
Time evolution operator acting on 2 spins.
Parameters
----------
J : float
Pair-wise interaction energy.
hx : float
Magnetic energy of each spin with dipole moment mu in field B.
t : float
Timestep of each iteration.
Returns
-------
U : (2,2,2,2) ndarray
Non-unitary time evolution operator.
"""
X = s(3)
Z = s(1)
hamiltonian = -J*np.kron(X,X)\
-(np.kron(Z,s(0))+np.kron(s(0),Z))*hx*0.5
U = expm(-hamiltonian*t)
return np.reshape(U,(2,2,2,2))
def s(i):
"""
The Pauli Matrices I,X,Y,Z.
s(0) = I, s(1) = X, s(2) = Y, s(4) = Z.
Parameters
----------
i : index of Pauli Matrix.
Returns
-------
s : (2,2) ndarray
Pauli matrix with complex elements.
"""
if i == 0:
return np.eye(2,dtype=complex)
elif i == 1:
return np.array([[0,1],[1,0]],dtype=complex)
elif i == 2:
return np.array([[0,-1j],[1j,0]],dtype=complex)
elif i == 3:
return np.array([[1,0],[0,-1]],dtype=complex)
def pauli(paulis,positions,N):
mat = 1+0j
identity = s(0)
for i in xrange(N):
if i in positions:
mat = np.kron(mat,s(paulis[positions.index(i)]))
else:
mat = np.kron(mat,identity)
return mat
def expm(A):
"""
Matrix exponential by eigen-decomposition.
Parameters
----------
A : (N, N) array_like
Matrix to be exponentiated
Returns
-------
expm : (N, N) ndarray
Matrix exponential of A
"""
s,vr = np.linalg.eig(A)
vri = np.linalg.inv(vr)
return np.dot(np.dot(vr,np.diag(np.exp(s))),vri)
def displayState(state):
display = ""
N = int(np.log2(state.size))
for i in xrange(state.size):
display += " + %.4f*exp(%d"%(abs(state[i]),np.degrees(phase(state[i])))
display += u'\u00b0' + "i)|" + format(i,"0"+str(N)+"b") + ">"
if i % 2:
display += "\n"
print(display[:-1])
if __name__ == "__main__":
main()
| mit |
robin-lai/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
alvarofierroclavero/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
n7jti/kaggle | DigitRecognizer/learndigits.py | 1 | 1291 | #!/usr/bin/python
from scipy import *
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn import metrics
from sklearn import svm
import time
import pickle
def load ():
# Load a csv of floats:
#train = np.genfromtxt("data/train.csv", delimiter=",", skip_header=1)
#y_train = train[:,0].astype(int)
#x_train = train[:,1:]
npzfile = np.load('data/bindata.npz')
x = npzfile['x']
y = npzfile['y'].astype(int)
#test = np.genfromtxt("data/test.csv", delimiter=",", skip_header=1)
#x_test = test
return y, x
def main ():
print 'starting', time.asctime(time.localtime())
y, x, = load();
# split into a training and testing set
# x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.5)
x_train = x
y_train = y
# Set the parameters by cross-validation
C=10
gamma=5e-7
clf = svm.SVC(C=C, gamma=gamma)
# We learn the digits on the first half of the digits
clf.fit(x_train, y_train)
# Pickle the model!
outf = open('training.pkl', 'wb')
pickle.dump(clf, outf)
outf.close()
print 'done!', time.asctime(time.localtime())
if __name__ == "__main__":
main()
| apache-2.0 |
james4424/nest-simulator | pynest/examples/sinusoidal_gamma_generator.py | 3 | 11497 | # -*- coding: utf-8 -*-
#
# sinusoidal_gamma_generator.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
#
'''
Sinusoidal gamma generator example
----------------------------------
This script demonstrates the use of the `sinusoidal_gamma_generator`
and its different parameters and modes. The source code of the model
can be found in models/sinusoidal_gamma_generator.h.
The script is structured into two parts, each of which generates its
own figure. In part 1A, two generators are created with different
orders of the underlying gamma process and their resulting PST
(Peristiumulus time) and ISI (Inter-spike interval) histograms are
plotted. Part 1B illustrates the effect of the
``individual_spike_trains`` switch. In Part 2, the effects of
different settings for rate, phase and frequency are demonstrated.
'''
'''
First, we import all necessary modules for simulation, analysis and
plotting.
'''
import nest
import matplotlib.pyplot as plt
import numpy as np
nest.ResetKernel() # in case we run the script multiple times from iPython
'''
We first create a figure for the plot and set the resolution of NEST.
'''
plt.figure()
nest.SetKernelStatus({'resolution': 0.01})
'''
Then we create two instances of the `sinusoidal_gamma_generator`
with two different orders of the underlying gamma process using
`Create`. Moreover, we create devices to record firing rates
(`multimeter`) and spikes (`spike_detector`) and connect them to the
generators using `Connect`.
'''
g = nest.Create('sinusoidal_gamma_generator', n=2,
params=[{'rate': 10000.0, 'amplitude': 5000.0,
'frequency': 10.0, 'phase': 0.0, 'order': 2.0},
{'rate': 10000.0, 'amplitude': 5000.0,
'frequency': 10.0, 'phase': 0.0, 'order': 10.0}])
m = nest.Create('multimeter', n=2, params={'interval': 0.1, 'withgid': False,
'record_from': ['rate']})
s = nest.Create('spike_detector', n=2, params={'withgid': False})
nest.Connect(m, g, 'one_to_one')
nest.Connect(g, s, 'one_to_one')
nest.Simulate(200)
'''
After simulating, the spikes are extracted from the
`spike_detector` using `GetStatus` and plots are created with panels
for the PST and ISI histograms.
'''
colors = ['b', 'g']
for j in range(2):
ev = nest.GetStatus([m[j]])[0]['events']
t = ev['times']
r = ev['rate']
sp = nest.GetStatus([s[j]])[0]['events']['times']
plt.subplot(221)
h, e = np.histogram(sp, bins=np.arange(0., 201., 5.))
plt.plot(t, r, color=colors[j])
plt.step(e[:-1], h * 1000 / 5., color=colors[j], where='post')
plt.title('PST histogram and firing rates')
plt.ylabel('Spikes per second')
plt.subplot(223)
plt.hist(np.diff(sp), bins=np.arange(0., 0.505, 0.01),
histtype='step', color=colors[j])
plt.title('ISI histogram')
'''
The kernel is reset and the number of threads set to 4.
'''
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': 4})
'''
First, a `sinusoidal_gamma_generator` with
`individual_spike_trains` set to ``True`` is created and connected to
20 parrot neurons whose spikes are recorded by a spike detector. After
simulating, a raster plot of the spikes is created.
'''
g = nest.Create('sinusoidal_gamma_generator',
params={'rate': 100.0, 'amplitude': 50.0,
'frequency': 10.0, 'phase': 0.0, 'order': 3.,
'individual_spike_trains': True})
p = nest.Create('parrot_neuron', 20)
s = nest.Create('spike_detector')
nest.Connect(g, p)
nest.Connect(p, s)
nest.Simulate(200)
ev = nest.GetStatus(s)[0]['events']
plt.subplot(222)
plt.plot(ev['times'], ev['senders'] - min(ev['senders']), 'o')
plt.ylim([-0.5, 19.5])
plt.yticks([])
plt.title('Individual spike trains for each target')
'''
The kernel is reset again and the whole procedure is repeated for
a `sinusoidal_gamma_generator` with `individual_spike_trains` set to ``False``.
The plot shows that in this case, all neurons receive the same spike train from
the `sinusoidal_gamma_generator`.
'''
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': 4})
g = nest.Create('sinusoidal_gamma_generator',
params={'rate': 100.0, 'amplitude': 50.0,
'frequency': 10.0, 'phase': 0.0, 'order': 3.,
'individual_spike_trains': False})
p = nest.Create('parrot_neuron', 20)
s = nest.Create('spike_detector')
nest.Connect(g, p)
nest.Connect(p, s)
nest.Simulate(200)
ev = nest.GetStatus(s)[0]['events']
plt.subplot(224)
plt.plot(ev['times'], ev['senders'] - min(ev['senders']), 'o')
plt.ylim([-0.5, 19.5])
plt.yticks([])
plt.title('One spike train for all targets')
'''
In part 2, multiple generators are created with different settings
for rate, phase and frequency. First, we define an auxiliary function
which simulates ``n`` generators for ``t`` ms. After ``t/2``, the
parameter dictionary of the generators is changed from initial to
after.
'''
def step(t, n, initial, after, seed=1, dt=0.05):
"""Simulates for n generators for t ms. Step at t/2."""
nest.ResetKernel()
nest.SetStatus([0], [{"resolution": dt}])
nest.SetStatus([0], [{"grng_seed": 256 * seed + 1}])
nest.SetStatus([0], [{"rng_seeds": [256 * seed + 2]}])
g = nest.Create('sinusoidal_gamma_generator', n, params=initial)
sd = nest.Create('spike_detector')
nest.Connect(g, sd)
nest.Simulate(t / 2)
nest.SetStatus(g, after)
nest.Simulate(t / 2)
return nest.GetStatus(sd, 'events')[0]
'''
This function serves to plot a histogram of the emitted spikes.
'''
def plot_hist(spikes):
plt.hist(spikes['times'],
bins=np.arange(0., max(spikes['times']) + 1.5, 1.),
histtype='step')
t = 1000
n = 1000
dt = 1.0
steps = t / dt
offset = t / 1000. * 2 * np.pi
'''
We create a figure with a 2x3 grid.
'''
grid = (2, 3)
fig = plt.figure(figsize=(15, 10))
'''
Simulate a `sinusoidal_gamma_generator` with default parameter
values, i.e. ac=0 and the DC value being changed from 20 to 50 after
``t/2`` and plot the number of spikes per second over time.
'''
plt.subplot(grid[0], grid[1], 1)
spikes = step(t, n,
{'rate': 20.0},
{'rate': 50.0, },
seed=123, dt=dt)
plot_hist(spikes)
exp = np.ones(steps)
exp[:steps / 2] *= 20
exp[steps / 2:] *= 50
plt.plot(exp, 'r')
plt.title('DC rate: 20 -> 50')
plt.ylabel('Spikes per second')
'''
Simulate a `sinusoidal_gamma_generator` with the DC value being
changed from 80 to 40 after ``t/2`` and plot the number of spikes per
second over time.
'''
plt.subplot(grid[0], grid[1], 2)
spikes = step(t, n,
{'order': 6.0, 'rate': 80.0, 'amplitude': 0.,
'frequency': 0., 'phase': 0.},
{'order': 6.0, 'rate': 40.0, 'amplitude': 0.,
'frequency': 0., 'phase': 0.},
seed=123, dt=dt)
plot_hist(spikes)
exp = np.ones(steps)
exp[:steps / 2] *= 80
exp[steps / 2:] *= 40
plt.plot(exp, 'r')
plt.title('DC rate: 80 -> 40')
'''
Simulate a `sinusoidal_gamma_generator` with the AC value being
changed from 40 to 20 after ``t/2`` and plot the number of spikes per
second over time.
'''
plt.subplot(grid[0], grid[1], 3)
spikes = step(t, n,
{'order': 3.0, 'rate': 40.0, 'amplitude': 40.,
'frequency': 10., 'phase': 0.},
{'order': 3.0, 'rate': 40.0, 'amplitude': 20.,
'frequency': 10., 'phase': 0.},
seed=123, dt=dt)
plot_hist(spikes)
exp = np.zeros(steps)
exp[:steps / 2] = (40. +
40. * np.sin(np.arange(0, t / 1000. * np.pi * 10,
t / 1000. * np.pi * 10. /
(steps / 2))))
exp[steps / 2:] = (40. + 20. * np.sin(np.arange(0, t / 1000. * np.pi * 10,
t / 1000. * np.pi * 10. /
(steps / 2)) + offset))
plt.plot(exp, 'r')
plt.title('Rate Modulation: 40 -> 20')
'''
Simulate a `sinusoidal_gamma_generator` with a non-zero AC value
and the DC value being changed from 80 to 40 after ``t/2`` and plot
the number of spikes per second over time.
'''
plt.subplot(grid[0], grid[1], 4)
spikes = step(t, n,
{'order': 6.0, 'rate': 20.0, 'amplitude': 20.,
'frequency': 10., 'phase': 0.},
{'order': 6.0, 'rate': 50.0, 'amplitude': 50.,
'frequency': 10., 'phase': 0.},
seed=123, dt=dt)
plot_hist(spikes)
exp = np.zeros(steps)
exp[:steps / 2] = (20. + 20. * np.sin(np.arange(0, t / 1000. * np.pi * 10,
t / 1000. * np.pi * 10. /
(steps / 2))))
exp[steps / 2:] = (50. + 50. * np.sin(np.arange(0, t / 1000. * np.pi * 10,
t / 1000. * np.pi * 10. /
(steps / 2)) + offset))
plt.plot(exp, 'r')
plt.title('DC Rate and Rate Modulation: 20 -> 50')
plt.ylabel('Spikes per second')
plt.xlabel('Time [ms]')
'''
Simulate a `sinusoidal_gamma_generator` with the AC value being
changed from 0 to 40 after ``t/2`` and plot the number of spikes per
second over time.
'''
plt.subplot(grid[0], grid[1], 5)
spikes = step(t, n,
{'rate': 40.0, },
{'amplitude': 40.0, 'frequency': 20.},
seed=123, dt=1.)
plot_hist(spikes)
exp = np.zeros(steps)
exp[:steps / 2] = 40. * np.ones(steps / 2)
exp[steps / 2:] = (40. + 40. * np.sin(np.arange(0, t / 1000. * np.pi * 20,
t / 1000. * np.pi * 20. /
(steps / 2))))
plt.plot(exp, 'r')
plt.title('Rate Modulation: 0 -> 40')
plt.xlabel('Time [ms]')
'''
Simulate a `sinusoidal_gamma_generator` with a phase shift at
``t/2`` and plot the number of spikes per second over time.
'''
# Phase shift
plt.subplot(grid[0], grid[1], 6)
spikes = step(t, n,
{'order': 6.0, 'rate': 60.0, 'amplitude': 60.,
'frequency': 10., 'phase': 0.},
{'order': 6.0, 'rate': 60.0, 'amplitude': 60.,
'frequency': 10., 'phase': 180.},
seed=123, dt=1.)
plot_hist(spikes)
exp = np.zeros(steps)
exp[:steps / 2] = (60. + 60. * np.sin(np.arange(0, t / 1000. * np.pi * 10,
t / 1000. * np.pi * 10. /
(steps / 2))))
exp[steps / 2:] = (60. + 60. * np.sin(np.arange(0, t / 1000. * np.pi * 10,
t / 1000. * np.pi * 10. /
(steps / 2)) + offset + np.pi))
plt.plot(exp, 'r')
plt.title('Modulation Phase: 0 -> Pi')
plt.xlabel('Time [ms]')
| gpl-2.0 |
blueburningcoder/nupic | src/nupic/math/roc_utils.py | 49 | 8308 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Utility functions to compute ROC (Receiver Operator Characteristic) curves
and AUC (Area Under the Curve).
The ROCCurve() and AreaUnderCurve() functions are based on the roc_curve()
and auc() functions found in metrics.py module of scikit-learn
(http://scikit-learn.org/stable/). Scikit-learn has a BSD license (3 clause).
Following is the original license/credits statement from the top of the
metrics.py file:
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD Style.
"""
import numpy as np
def ROCCurve(y_true, y_score):
"""compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Parameters
----------
y_true : array, shape = [n_samples]
true binary labels
y_score : array, shape = [n_samples]
target scores, can either be probability estimates of
the positive class, confidence values, or binary decisions.
Returns
-------
fpr : array, shape = [>2]
False Positive Rates
tpr : array, shape = [>2]
True Positive Rates
thresholds : array, shape = [>2]
Thresholds on y_score used to compute fpr and tpr
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
References
----------
http://en.wikipedia.org/wiki/Receiver_operating_characteristic
"""
y_true = np.ravel(y_true)
classes = np.unique(y_true)
# ROC only for binary classification
if classes.shape[0] != 2:
raise ValueError("ROC is defined for binary classification only")
y_score = np.ravel(y_score)
n_pos = float(np.sum(y_true == classes[1])) # nb of true positive
n_neg = float(np.sum(y_true == classes[0])) # nb of true negative
thresholds = np.unique(y_score)
neg_value, pos_value = classes[0], classes[1]
tpr = np.empty(thresholds.size, dtype=np.float) # True positive rate
fpr = np.empty(thresholds.size, dtype=np.float) # False positive rate
# Build tpr/fpr vector
current_pos_count = current_neg_count = sum_pos = sum_neg = idx = 0
signal = np.c_[y_score, y_true]
sorted_signal = signal[signal[:, 0].argsort(), :][::-1]
last_score = sorted_signal[0][0]
for score, value in sorted_signal:
if score == last_score:
if value == pos_value:
current_pos_count += 1
else:
current_neg_count += 1
else:
tpr[idx] = (sum_pos + current_pos_count) / n_pos
fpr[idx] = (sum_neg + current_neg_count) / n_neg
sum_pos += current_pos_count
sum_neg += current_neg_count
current_pos_count = 1 if value == pos_value else 0
current_neg_count = 1 if value == neg_value else 0
idx += 1
last_score = score
else:
tpr[-1] = (sum_pos + current_pos_count) / n_pos
fpr[-1] = (sum_neg + current_neg_count) / n_neg
# hard decisions, add (0,0)
if fpr.shape[0] == 2:
fpr = np.array([0.0, fpr[0], fpr[1]])
tpr = np.array([0.0, tpr[0], tpr[1]])
# trivial decisions, add (0,0) and (1,1)
elif fpr.shape[0] == 1:
fpr = np.array([0.0, fpr[0], 1.0])
tpr = np.array([0.0, tpr[0], 1.0])
return fpr, tpr, thresholds
def AreaUnderCurve(x, y):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
Parameters
----------
x : array, shape = [n]
x coordinates
y : array, shape = [n]
y coordinates
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred)
>>> metrics.auc(fpr, tpr)
0.75
"""
#x, y = check_arrays(x, y)
if x.shape[0] != y.shape[0]:
raise ValueError('x and y should have the same shape'
' to compute area under curve,'
' but x.shape = %s and y.shape = %s.'
% (x.shape, y.shape))
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
# reorder the data points according to the x axis
order = np.argsort(x)
x = x[order]
y = y[order]
h = np.diff(x)
area = np.sum(h * (y[1:] + y[:-1])) / 2.0
return area
def _printNPArray(x, precision=2):
format = "%%.%df" % (precision)
for elem in x:
print format % (elem),
print
def _test():
"""
This is a toy example, to show the basic functionality:
The dataset is:
actual prediction
-------------------------
0 0.1
0 0.4
1 0.5
1 0.3
1 0.45
Some ROC terminology:
A True Positive (TP) is when we predict TRUE and the actual value is 1.
A False Positive (FP) is when we predict TRUE, but the actual value is 0.
The True Positive Rate (TPR) is TP/P, where P is the total number of actual
positives (3 in this example, the last 3 samples).
The False Positive Rate (FPR) is FP/N, where N is the total number of actual
negatives (2 in this example, the first 2 samples)
Here are the classifications at various choices for the threshold. The
prediction is TRUE if the predicted value is >= threshold and FALSE otherwise.
actual pred 0.50 0.45 0.40 0.30 0.10
---------------------------------------------------------
0 0.1 0 0 0 0 1
0 0.4 0 0 1 1 1
1 0.5 1 1 1 1 1
1 0.3 0 0 0 1 1
1 0.45 0 1 1 1 1
TruePos(TP) 1 2 2 3 3
FalsePos(FP) 0 0 1 1 2
TruePosRate(TPR) 1/3 2/3 2/3 3/3 3/3
FalsePosRate(FPR) 0/2 0/2 1/2 1/2 2/2
The ROC curve is a plot of FPR on the x-axis and TPR on the y-axis. Basically,
one can pick any operating point along this curve to run, the operating point
determined by which threshold you want to use. By changing the threshold, you
tradeoff TP's for FPs.
The more area under this curve, the better the classification algorithm is.
The AreaUnderCurve() function can be used to compute the area under this
curve.
"""
yTrue = np.array([0, 0, 1, 1, 1])
yScore = np.array([0.1, 0.4, 0.5, 0.3, 0.45])
(fpr, tpr, thresholds) = ROCCurve(yTrue, yScore)
print "Actual: ",
_printNPArray(yTrue)
print "Predicted: ",
_printNPArray(yScore)
print
print "Thresholds:",
_printNPArray(thresholds[::-1])
print "FPR(x): ",
_printNPArray(fpr)
print "TPR(y): ",
_printNPArray(tpr)
print
area = AreaUnderCurve(fpr, tpr)
print "AUC: ", area
if __name__=='__main__':
_test()
| agpl-3.0 |
ferchault/iago | tests/unit/test_PandasPatch.py | 1 | 1765 | # system modules
from unittest import TestCase
# third-party modules
import pandas as pd
# custom modules
import iago
class TestPandasPatch(TestCase):
def test_explain(self):
df = pd.DataFrame(columns='a b'.split())
self.assertEqual(df.explain('a').to_dict(), {
'Comment': {0: 'No description available.'},
'Name': {0: 'a'},
'Unit': {0: 'No unit available.'}})
self.assertEqual(df.explain('c').to_dict(), {
'Comment': {0: 'No description available.'},
'Name': {0: 'c'},
'Unit': {0: 'No unit available.'}})
self.assertEqual(df.explain('a b'.split()).to_dict(), {
'Comment': {0: 'No description available.', 1: 'No description available.'},
'Name': {0: 'a', 1: 'b'},
'Unit': {0: 'No unit available.', 1: 'No unit available.'}})
self.assertEqual(df.explain().to_dict(), {
'Comment': {0: 'No description available.', 1: 'No description available.'},
'Name': {0: 'a', 1: 'b'},
'Unit': {0: 'No unit available.', 1: 'No unit available.'}})
df._iago_units['a'] = 'bar'
df._iago_comments['a'] = 'foo'
self.assertEqual(df.explain('a').to_dict(), {
'Comment': {0: 'foo'},
'Name': {0: 'a'},
'Unit': {0: 'bar'}})
df._iago_units['a'] = None
self.assertEqual(df.explain('a').to_dict(), {
'Comment': {0: 'foo'},
'Name': {0: 'a'},
'Unit': {0: 'Dimensionless.'}})
def test_annotations_to_dict(self):
df = pd.DataFrame(columns='a b'.split())
df._iago_units['a'] = 'bar'
df._iago_comments['a'] = 'foo'
self.assertEqual(df.annotations_to_dict(), {'a': ('foo', 'bar')})
def test_instance_attributes(self):
df1 = pd.DataFrame(columns='a b'.split())
df1._iago_units['a'] = 'test'
df2 = pd.DataFrame(columns='a b'.split())
self.assertTrue(df1.explain().to_dict() != df2.explain().to_dict())
| mit |
dolejarz/engsci_capstone_transport | python/temporal_analysis/subway_trips.py | 1 | 1027 | import pandas as pd
import matplotlib.pyplot as plt
df_subway = pd.read_csv('/Users/dolejarz/Documents/Engineering Science/4th Year/CIV455/github/engsci_capstone_transport/gis/subway_buffer/trips_in_buffer.csv')
df_all = pd.read_csv('/Users/dolejarz/Documents/Engineering Science/4th Year/CIV455/github/engsci_capstone_transport/csv/Trips_Oct_31_2017.csv')
df_subway['tx'] = pd.to_datetime(df_subway['Date'])
df_all['tx'] = pd.to_datetime(df_all['Date'])
df_subway['start_hour'] = df_subway['tx'].dt.hour
df_all['start_hour'] = df_all['tx'].dt.hour
pt_subway = pd.pivot_table(df_subway,index='start_hour',aggfunc='count')
pt_all = pd.pivot_table(df_all,index='start_hour',aggfunc='count')
#series of hourly distribution of trips as a percent of daily total
subway_percent = pt_subway['tx']/float(pt_subway['tx'].sum())
all_percent = pt_all['tx']/float(pt_all['tx'].sum())
plt.figure(figsize=(12,8), dpi=300)
plt.plot(range(24), subway_percent)
plt.plot(range(24), all_percent,color='r')
plt.savefig('subway_vs_all.png')
| mit |
bsipocz/statsmodels | statsmodels/base/data.py | 10 | 21796 | """
Base tools for handling various kinds of data structures, attaching metadata to
results, and doing data cleaning
"""
from statsmodels.compat.python import reduce, iteritems, lmap, zip, range
from statsmodels.compat.numpy import np_matrix_rank
import numpy as np
from pandas import DataFrame, Series, TimeSeries, isnull
from statsmodels.tools.decorators import (resettable_cache, cache_readonly,
cache_writable)
import statsmodels.tools.data as data_util
from statsmodels.tools.sm_exceptions import MissingDataError
def _asarray_2dcolumns(x):
if np.asarray(x).ndim > 1 and np.asarray(x).squeeze().ndim == 1:
return
def _asarray_2d_null_rows(x):
"""
Makes sure input is an array and is 2d. Makes sure output is 2d. True
indicates a null in the rows of 2d x.
"""
#Have to have the asarrays because isnull doesn't account for array-like
#input
x = np.asarray(x)
if x.ndim == 1:
x = x[:, None]
return np.any(isnull(x), axis=1)[:, None]
def _nan_rows(*arrs):
"""
Returns a boolean array which is True where any of the rows in any
of the _2d_ arrays in arrs are NaNs. Inputs can be any mixture of Series,
DataFrames or array-like.
"""
if len(arrs) == 1:
arrs += ([[False]],)
def _nan_row_maybe_two_inputs(x, y):
# check for dtype bc dataframe has dtypes
x_is_boolean_array = hasattr(x, 'dtype') and x.dtype == bool and x
return np.logical_or(_asarray_2d_null_rows(x),
(x_is_boolean_array | _asarray_2d_null_rows(y)))
return reduce(_nan_row_maybe_two_inputs, arrs).squeeze()
class ModelData(object):
"""
Class responsible for handling input data and extracting metadata into the
appropriate form
"""
_param_names = None
def __init__(self, endog, exog=None, missing='none', hasconst=None,
**kwargs):
if 'design_info' in kwargs:
self.design_info = kwargs.pop('design_info')
if 'formula' in kwargs:
self.formula = kwargs.pop('formula')
if missing != 'none':
arrays, nan_idx = self.handle_missing(endog, exog, missing,
**kwargs)
self.missing_row_idx = nan_idx
self.__dict__.update(arrays) # attach all the data arrays
self.orig_endog = self.endog
self.orig_exog = self.exog
self.endog, self.exog = self._convert_endog_exog(self.endog,
self.exog)
else:
self.__dict__.update(kwargs) # attach the extra arrays anyway
self.orig_endog = endog
self.orig_exog = exog
self.endog, self.exog = self._convert_endog_exog(endog, exog)
# this has side-effects, attaches k_constant and const_idx
self._handle_constant(hasconst)
self._check_integrity()
self._cache = resettable_cache()
def __getstate__(self):
from copy import copy
d = copy(self.__dict__)
if "design_info" in d:
del d["design_info"]
d["restore_design_info"] = True
return d
def __setstate__(self, d):
if "restore_design_info" in d:
# NOTE: there may be a more performant way to do this
from patsy import dmatrices, PatsyError
exc = []
try:
data = d['frame']
except KeyError:
data = d['orig_endog'].join(d['orig_exog'])
for depth in [2, 3, 1, 0, 4]: # sequence is a guess where to likely find it
try:
_, design = dmatrices(d['formula'], data, eval_env=depth,
return_type='dataframe')
break
except (NameError, PatsyError) as e:
print('not in depth %d' % depth)
exc.append(e) # why do I need a reference from outside except block
pass
else:
raise exc[-1]
self.design_info = design.design_info
del d["restore_design_info"]
self.__dict__.update(d)
def _handle_constant(self, hasconst):
if hasconst is not None:
if hasconst:
self.k_constant = 1
self.const_idx = None
else:
self.k_constant = 0
self.const_idx = None
elif self.exog is None:
self.const_idx = None
self.k_constant = 0
else:
# detect where the constant is
check_implicit = False
const_idx = np.where(self.exog.ptp(axis=0) == 0)[0].squeeze()
self.k_constant = const_idx.size
if self.k_constant == 1:
if self.exog[:, const_idx].mean() != 0:
self.const_idx = const_idx
else:
# we only have a zero column and no other constant
check_implicit = True
elif self.k_constant > 1:
# we have more than one constant column
# look for ones
values = [] # keep values if we need != 0
for idx in const_idx:
value = self.exog[:, idx].mean()
if value == 1:
self.k_constant = 1
self.const_idx = idx
break
values.append(value)
else:
# we didn't break, no column of ones
pos = (np.array(values) != 0)
if pos.any():
# take the first nonzero column
self.k_constant = 1
self.const_idx = const_idx[pos.argmax()]
else:
# only zero columns
check_implicit = True
elif self.k_constant == 0:
check_implicit = True
else:
# shouldn't be here
pass
if check_implicit:
# look for implicit constant
# Compute rank of augmented matrix
augmented_exog = np.column_stack(
(np.ones(self.exog.shape[0]), self.exog))
rank_augm = np_matrix_rank(augmented_exog)
rank_orig = np_matrix_rank(self.exog)
self.k_constant = int(rank_orig == rank_augm)
self.const_idx = None
@classmethod
def _drop_nans(cls, x, nan_mask):
return x[nan_mask]
@classmethod
def _drop_nans_2d(cls, x, nan_mask):
return x[nan_mask][:, nan_mask]
@classmethod
def handle_missing(cls, endog, exog, missing, **kwargs):
"""
This returns a dictionary with keys endog, exog and the keys of
kwargs. It preserves Nones.
"""
none_array_names = []
# patsy's already dropped NaNs in y/X
missing_idx = kwargs.pop('missing_idx', None)
if missing_idx is not None:
# y, X already handled by patsy. add back in later.
combined = ()
combined_names = []
if exog is None:
none_array_names += ['exog']
elif exog is not None:
combined = (endog, exog)
combined_names = ['endog', 'exog']
else:
combined = (endog,)
combined_names = ['endog']
none_array_names += ['exog']
# deal with other arrays
combined_2d = ()
combined_2d_names = []
if len(kwargs):
for key, value_array in iteritems(kwargs):
if value_array is None or value_array.ndim == 0:
none_array_names += [key]
continue
# grab 1d arrays
if value_array.ndim == 1:
combined += (np.asarray(value_array),)
combined_names += [key]
elif value_array.squeeze().ndim == 1:
combined += (np.asarray(value_array),)
combined_names += [key]
# grab 2d arrays that are _assumed_ to be symmetric
elif value_array.ndim == 2:
combined_2d += (np.asarray(value_array),)
combined_2d_names += [key]
else:
raise ValueError("Arrays with more than 2 dimensions "
"aren't yet handled")
if missing_idx is not None:
nan_mask = missing_idx
updated_row_mask = None
if combined: # there were extra arrays not handled by patsy
combined_nans = _nan_rows(*combined)
if combined_nans.shape[0] != nan_mask.shape[0]:
raise ValueError("Shape mismatch between endog/exog "
"and extra arrays given to model.")
# for going back and updated endog/exog
updated_row_mask = combined_nans[~nan_mask]
nan_mask |= combined_nans # for updating extra arrays only
if combined_2d:
combined_2d_nans = _nan_rows(combined_2d)
if combined_2d_nans.shape[0] != nan_mask.shape[0]:
raise ValueError("Shape mismatch between endog/exog "
"and extra 2d arrays given to model.")
if updated_row_mask is not None:
updated_row_mask |= combined_2d_nans[~nan_mask]
else:
updated_row_mask = combined_2d_nans[~nan_mask]
nan_mask |= combined_2d_nans
else:
nan_mask = _nan_rows(*combined)
if combined_2d:
nan_mask = _nan_rows(*(nan_mask[:, None],) + combined_2d)
if not np.any(nan_mask): # no missing don't do anything
combined = dict(zip(combined_names, combined))
if combined_2d:
combined.update(dict(zip(combined_2d_names, combined_2d)))
if none_array_names:
combined.update(dict(zip(none_array_names,
[None] * len(none_array_names))))
if missing_idx is not None:
combined.update({'endog': endog})
if exog is not None:
combined.update({'exog': exog})
return combined, []
elif missing == 'raise':
raise MissingDataError("NaNs were encountered in the data")
elif missing == 'drop':
nan_mask = ~nan_mask
drop_nans = lambda x: cls._drop_nans(x, nan_mask)
drop_nans_2d = lambda x: cls._drop_nans_2d(x, nan_mask)
combined = dict(zip(combined_names, lmap(drop_nans, combined)))
if missing_idx is not None:
if updated_row_mask is not None:
updated_row_mask = ~updated_row_mask
# update endog/exog with this new information
endog = cls._drop_nans(endog, updated_row_mask)
if exog is not None:
exog = cls._drop_nans(exog, updated_row_mask)
combined.update({'endog': endog})
if exog is not None:
combined.update({'exog': exog})
if combined_2d:
combined.update(dict(zip(combined_2d_names,
lmap(drop_nans_2d, combined_2d))))
if none_array_names:
combined.update(dict(zip(none_array_names,
[None] * len(none_array_names))))
return combined, np.where(~nan_mask)[0].tolist()
else:
raise ValueError("missing option %s not understood" % missing)
def _convert_endog_exog(self, endog, exog):
# for consistent outputs if endog is (n,1)
yarr = self._get_yarr(endog)
xarr = None
if exog is not None:
xarr = self._get_xarr(exog)
if xarr.ndim == 1:
xarr = xarr[:, None]
if xarr.ndim != 2:
raise ValueError("exog is not 1d or 2d")
return yarr, xarr
@cache_writable()
def ynames(self):
endog = self.orig_endog
ynames = self._get_names(endog)
if not ynames:
ynames = _make_endog_names(self.endog)
if len(ynames) == 1:
return ynames[0]
else:
return list(ynames)
@cache_writable()
def xnames(self):
exog = self.orig_exog
if exog is not None:
xnames = self._get_names(exog)
if not xnames:
xnames = _make_exog_names(self.exog)
return list(xnames)
return None
@property
def param_names(self):
# for handling names of 'extra' parameters in summary, etc.
return self._param_names or self.xnames
@param_names.setter
def param_names(self, values):
self._param_names = values
@cache_readonly
def row_labels(self):
exog = self.orig_exog
if exog is not None:
row_labels = self._get_row_labels(exog)
else:
endog = self.orig_endog
row_labels = self._get_row_labels(endog)
return row_labels
def _get_row_labels(self, arr):
return None
def _get_names(self, arr):
if isinstance(arr, DataFrame):
return list(arr.columns)
elif isinstance(arr, Series):
if arr.name:
return [arr.name]
else:
return
else:
try:
return arr.dtype.names
except AttributeError:
pass
return None
def _get_yarr(self, endog):
if data_util._is_structured_ndarray(endog):
endog = data_util.struct_to_ndarray(endog)
endog = np.asarray(endog)
if len(endog) == 1: # never squeeze to a scalar
if endog.ndim == 1:
return endog
elif endog.ndim > 1:
return np.asarray([endog.squeeze()])
return endog.squeeze()
def _get_xarr(self, exog):
if data_util._is_structured_ndarray(exog):
exog = data_util.struct_to_ndarray(exog)
return np.asarray(exog)
def _check_integrity(self):
if self.exog is not None:
if len(self.exog) != len(self.endog):
raise ValueError("endog and exog matrices are different sizes")
def wrap_output(self, obj, how='columns', names=None):
if how == 'columns':
return self.attach_columns(obj)
elif how == 'rows':
return self.attach_rows(obj)
elif how == 'cov':
return self.attach_cov(obj)
elif how == 'dates':
return self.attach_dates(obj)
elif how == 'columns_eq':
return self.attach_columns_eq(obj)
elif how == 'cov_eq':
return self.attach_cov_eq(obj)
elif how == 'generic_columns':
return self.attach_generic_columns(obj, names)
elif how == 'generic_columns_2d':
return self.attach_generic_columns_2d(obj, names)
else:
return obj
def attach_columns(self, result):
return result
def attach_columns_eq(self, result):
return result
def attach_cov(self, result):
return result
def attach_cov_eq(self, result):
return result
def attach_rows(self, result):
return result
def attach_dates(self, result):
return result
def attach_generic_columns(self, result, *args, **kwargs):
return result
def attach_generic_columns_2d(self, result, *args, **kwargs):
return result
class PatsyData(ModelData):
def _get_names(self, arr):
return arr.design_info.column_names
class PandasData(ModelData):
"""
Data handling class which knows how to reattach pandas metadata to model
results
"""
def _convert_endog_exog(self, endog, exog=None):
#TODO: remove this when we handle dtype systematically
endog = np.asarray(endog)
exog = exog if exog is None else np.asarray(exog)
if endog.dtype == object or exog is not None and exog.dtype == object:
raise ValueError("Pandas data cast to numpy dtype of object. "
"Check input data with np.asarray(data).")
return super(PandasData, self)._convert_endog_exog(endog, exog)
@classmethod
def _drop_nans(cls, x, nan_mask):
if hasattr(x, 'ix'):
return x.ix[nan_mask]
else: # extra arguments could be plain ndarrays
return super(PandasData, cls)._drop_nans(x, nan_mask)
@classmethod
def _drop_nans_2d(cls, x, nan_mask):
if hasattr(x, 'ix'):
return x.ix[nan_mask].ix[:, nan_mask]
else: # extra arguments could be plain ndarrays
return super(PandasData, cls)._drop_nans_2d(x, nan_mask)
def _check_integrity(self):
endog, exog = self.orig_endog, self.orig_exog
# exog can be None and we could be upcasting one or the other
if (exog is not None and
(hasattr(endog, 'index') and hasattr(exog, 'index')) and
not self.orig_endog.index.equals(self.orig_exog.index)):
raise ValueError("The indices for endog and exog are not aligned")
super(PandasData, self)._check_integrity()
def _get_row_labels(self, arr):
try:
return arr.index
except AttributeError:
# if we've gotten here it's because endog is pandas and
# exog is not, so just return the row labels from endog
return self.orig_endog.index
def attach_generic_columns(self, result, names):
# get the attribute to use
column_names = getattr(self, names, None)
return Series(result, index=column_names)
def attach_generic_columns_2d(self, result, rownames, colnames=None):
colnames = colnames or rownames
rownames = getattr(self, rownames, None)
colnames = getattr(self, colnames, None)
return DataFrame(result, index=rownames, columns=colnames)
def attach_columns(self, result):
# this can either be a 1d array or a scalar
# don't squeeze because it might be a 2d row array
# if it needs a squeeze, the bug is elsewhere
if result.ndim <= 1:
return Series(result, index=self.param_names)
else: # for e.g., confidence intervals
return DataFrame(result, index=self.param_names)
def attach_columns_eq(self, result):
return DataFrame(result, index=self.xnames, columns=self.ynames)
def attach_cov(self, result):
return DataFrame(result, index=self.param_names,
columns=self.param_names)
def attach_cov_eq(self, result):
return DataFrame(result, index=self.ynames, columns=self.ynames)
def attach_rows(self, result):
# assumes if len(row_labels) > len(result) it's bc it was truncated
# at the front, for AR lags, for example
if result.squeeze().ndim == 1:
return Series(result, index=self.row_labels[-len(result):])
else: # this is for VAR results, may not be general enough
return DataFrame(result, index=self.row_labels[-len(result):],
columns=self.ynames)
def attach_dates(self, result):
return TimeSeries(result, index=self.predict_dates)
def _make_endog_names(endog):
if endog.ndim == 1 or endog.shape[1] == 1:
ynames = ['y']
else: # for VAR
ynames = ['y%d' % (i+1) for i in range(endog.shape[1])]
return ynames
def _make_exog_names(exog):
exog_var = exog.var(0)
if (exog_var == 0).any():
# assumes one constant in first or last position
# avoid exception if more than one constant
const_idx = exog_var.argmin()
exog_names = ['x%d' % i for i in range(1, exog.shape[1])]
exog_names.insert(const_idx, 'const')
else:
exog_names = ['x%d' % i for i in range(1, exog.shape[1]+1)]
return exog_names
def handle_missing(endog, exog=None, missing='none', **kwargs):
klass = handle_data_class_factory(endog, exog)
if missing == 'none':
ret_dict = dict(endog=endog, exog=exog)
ret_dict.update(kwargs)
return ret_dict, None
return klass.handle_missing(endog, exog, missing=missing, **kwargs)
def handle_data_class_factory(endog, exog):
"""
Given inputs
"""
if data_util._is_using_ndarray_type(endog, exog):
klass = ModelData
elif data_util._is_using_pandas(endog, exog):
klass = PandasData
elif data_util._is_using_patsy(endog, exog):
klass = PatsyData
# keep this check last
elif data_util._is_using_ndarray(endog, exog):
klass = ModelData
else:
raise ValueError('unrecognized data structures: %s / %s' %
(type(endog), type(exog)))
return klass
def handle_data(endog, exog, missing='none', hasconst=None, **kwargs):
# deal with lists and tuples up-front
if isinstance(endog, (list, tuple)):
endog = np.asarray(endog)
if isinstance(exog, (list, tuple)):
exog = np.asarray(exog)
klass = handle_data_class_factory(endog, exog)
return klass(endog, exog=exog, missing=missing, hasconst=hasconst,
**kwargs)
| bsd-3-clause |
wubr2000/zipline | zipline/utils/factory.py | 17 | 11554 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Factory functions to prepare useful data.
"""
import pytz
import random
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from zipline.protocol import Event, DATASOURCE_TYPE
from zipline.sources import (SpecificEquityTrades,
DataFrameSource,
DataPanelSource)
from zipline.finance.trading import SimulationParameters
from zipline.finance import trading
from zipline.sources.test_source import create_trade
# For backwards compatibility
from zipline.data.loader import (load_from_yahoo,
load_bars_from_yahoo)
__all__ = ['load_from_yahoo', 'load_bars_from_yahoo']
def create_simulation_parameters(year=2006, start=None, end=None,
capital_base=float("1.0e5"),
num_days=None, load=None,
data_frequency='daily',
emission_rate='daily'):
"""Construct a complete environment with reasonable defaults"""
if start is None:
start = datetime(year, 1, 1, tzinfo=pytz.utc)
if end is None:
if num_days:
trading.environment = trading.TradingEnvironment(load=load)
start_index = trading.environment.trading_days.searchsorted(
start)
end = trading.environment.trading_days[start_index + num_days - 1]
else:
end = datetime(year, 12, 31, tzinfo=pytz.utc)
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=capital_base,
data_frequency=data_frequency,
emission_rate=emission_rate,
)
return sim_params
def create_random_simulation_parameters():
trading.environment = trading.TradingEnvironment()
treasury_curves = trading.environment.treasury_curves
for n in range(100):
random_index = random.randint(
0,
len(treasury_curves) - 1
)
start_dt = treasury_curves.index[random_index]
end_dt = start_dt + timedelta(days=365)
now = datetime.utcnow().replace(tzinfo=pytz.utc)
if end_dt <= now:
break
assert end_dt <= now, """
failed to find a suitable daterange after 100 attempts. please double
check treasury and benchmark data in findb, and re-run the test."""
sim_params = SimulationParameters(
period_start=start_dt,
period_end=end_dt
)
return sim_params, start_dt, end_dt
def get_next_trading_dt(current, interval):
next_dt = pd.Timestamp(current).tz_convert(trading.environment.exchange_tz)
while True:
# Convert timestamp to naive before adding day, otherwise the when
# stepping over EDT an hour is added.
next_dt = pd.Timestamp(next_dt.replace(tzinfo=None))
next_dt = next_dt + interval
next_dt = pd.Timestamp(next_dt, tz=trading.environment.exchange_tz)
next_dt_utc = next_dt.tz_convert('UTC')
if trading.environment.is_market_hours(next_dt_utc):
break
next_dt = next_dt_utc.tz_convert(trading.environment.exchange_tz)
return next_dt_utc
def create_trade_history(sid, prices, amounts, interval, sim_params,
source_id="test_factory"):
trades = []
current = sim_params.first_open
trading.environment.update_asset_finder(identifiers=[sid])
oneday = timedelta(days=1)
use_midnight = interval >= oneday
for price, amount in zip(prices, amounts):
if use_midnight:
trade_dt = current.replace(hour=0, minute=0)
else:
trade_dt = current
trade = create_trade(sid, price, amount, trade_dt, source_id)
trades.append(trade)
current = get_next_trading_dt(current, interval)
assert len(trades) == len(prices)
return trades
def create_dividend(sid, payment, declared_date, ex_date, pay_date):
div = Event({
'sid': sid,
'gross_amount': payment,
'net_amount': payment,
'payment_sid': None,
'ratio': None,
'declared_date': pd.tslib.normalize_date(declared_date),
'ex_date': pd.tslib.normalize_date(ex_date),
'pay_date': pd.tslib.normalize_date(pay_date),
'type': DATASOURCE_TYPE.DIVIDEND,
'source_id': 'MockDividendSource'
})
return div
def create_stock_dividend(sid, payment_sid, ratio, declared_date,
ex_date, pay_date):
return Event({
'sid': sid,
'payment_sid': payment_sid,
'ratio': ratio,
'net_amount': None,
'gross_amount': None,
'dt': pd.tslib.normalize_date(declared_date),
'ex_date': pd.tslib.normalize_date(ex_date),
'pay_date': pd.tslib.normalize_date(pay_date),
'type': DATASOURCE_TYPE.DIVIDEND,
'source_id': 'MockDividendSource'
})
def create_split(sid, ratio, date):
return Event({
'sid': sid,
'ratio': ratio,
'dt': date.replace(hour=0, minute=0, second=0, microsecond=0),
'type': DATASOURCE_TYPE.SPLIT,
'source_id': 'MockSplitSource'
})
def create_txn(sid, price, amount, datetime):
txn = Event({
'sid': sid,
'amount': amount,
'dt': datetime,
'price': price,
'type': DATASOURCE_TYPE.TRANSACTION,
'source_id': 'MockTransactionSource'
})
return txn
def create_commission(sid, value, datetime):
txn = Event({
'dt': datetime,
'type': DATASOURCE_TYPE.COMMISSION,
'cost': value,
'sid': sid,
'source_id': 'MockCommissionSource'
})
return txn
def create_txn_history(sid, priceList, amtList, interval, sim_params):
txns = []
current = sim_params.first_open
for price, amount in zip(priceList, amtList):
current = get_next_trading_dt(current, interval)
txns.append(create_txn(sid, price, amount, current))
current = current + interval
return txns
def create_returns_from_range(sim_params):
return pd.Series(index=sim_params.trading_days,
data=np.random.rand(len(sim_params.trading_days)))
def create_returns_from_list(returns, sim_params):
return pd.Series(index=sim_params.trading_days[:len(returns)],
data=returns)
def create_daily_trade_source(sids, sim_params, concurrent=False):
"""
creates trade_count trades for each sid in sids list.
first trade will be on sim_params.period_start, and daily
thereafter for each sid. Thus, two sids should result in two trades per
day.
"""
return create_trade_source(
sids,
timedelta(days=1),
sim_params,
concurrent=concurrent
)
def create_minutely_trade_source(sids, sim_params, concurrent=False):
"""
creates trade_count trades for each sid in sids list.
first trade will be on sim_params.period_start, and every minute
thereafter for each sid. Thus, two sids should result in two trades per
minute.
"""
return create_trade_source(
sids,
timedelta(minutes=1),
sim_params,
concurrent=concurrent
)
def create_trade_source(sids, trade_time_increment, sim_params,
concurrent=False):
# If the sim_params define an end that is during market hours, that will be
# used as the end of the data source
if trading.environment.is_market_hours(sim_params.period_end):
end = sim_params.period_end
# Otherwise, the last_close after the period_end is used as the end of the
# data source
else:
end = sim_params.last_close
args = tuple()
kwargs = {
'sids': sids,
'start': sim_params.first_open,
'end': end,
'delta': trade_time_increment,
'filter': sids,
'concurrent': concurrent
}
source = SpecificEquityTrades(*args, **kwargs)
return source
def create_test_df_source(sim_params=None, bars='daily'):
if bars == 'daily':
freq = pd.datetools.BDay()
elif bars == 'minute':
freq = pd.datetools.Minute()
else:
raise ValueError('%s bars not understood.' % bars)
if sim_params:
index = sim_params.trading_days
else:
if trading.environment is None:
trading.environment = trading.TradingEnvironment()
start = pd.datetime(1990, 1, 3, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(1990, 1, 8, 0, 0, 0, 0, pytz.utc)
days = trading.environment.days_in_range(start, end)
if bars == 'daily':
index = days
if bars == 'minute':
index = pd.DatetimeIndex([], freq=freq)
for day in days:
day_index = trading.environment.market_minutes_for_day(day)
index = index.append(day_index)
x = np.arange(1, len(index) + 1)
df = pd.DataFrame(x, index=index, columns=[0])
trading.environment.update_asset_finder(identifiers=[0])
return DataFrameSource(df), df
def create_test_panel_source(sim_params=None, source_type=None):
start = sim_params.first_open \
if sim_params else pd.datetime(1990, 1, 3, 0, 0, 0, 0, pytz.utc)
end = sim_params.last_close \
if sim_params else pd.datetime(1990, 1, 8, 0, 0, 0, 0, pytz.utc)
if trading.environment is None:
trading.environment = trading.TradingEnvironment()
index = trading.environment.days_in_range(start, end)
price = np.arange(0, len(index))
volume = np.ones(len(index)) * 1000
arbitrary = np.ones(len(index))
df = pd.DataFrame({'price': price,
'volume': volume,
'arbitrary': arbitrary},
index=index)
if source_type:
source_types = np.full(len(index), source_type)
df['type'] = source_types
panel = pd.Panel.from_dict({0: df})
return DataPanelSource(panel), panel
def create_test_panel_ohlc_source(sim_params=None):
start = sim_params.first_open \
if sim_params else pd.datetime(1990, 1, 3, 0, 0, 0, 0, pytz.utc)
end = sim_params.last_close \
if sim_params else pd.datetime(1990, 1, 8, 0, 0, 0, 0, pytz.utc)
if trading.environment is None:
trading.environment = trading.TradingEnvironment()
index = trading.environment.days_in_range(start, end)
price = np.arange(0, len(index)) + 100
high = price * 1.05
low = price * 0.95
open_ = price + .1 * (price % 2 - .5)
volume = np.ones(len(index)) * 1000
arbitrary = np.ones(len(index))
df = pd.DataFrame({'price': price,
'high': high,
'low': low,
'open': open_,
'volume': volume,
'arbitrary': arbitrary},
index=index)
panel = pd.Panel.from_dict({0: df})
return DataPanelSource(panel), panel
| apache-2.0 |
EnsekiTT/ml_review | forAudio/visualization.py | 1 | 2015 | # -*- coding:utf-8 -*-
import wave
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.decomposition import KernelPCA
chunk = 2048*2
step = 1
threshold = 5000
after = 4000
before = 1000
sound1 = '../datasets/f_sound.wav'
sound2 = '../datasets/j_sound.wav'
s1_wav = wave.open(sound1, 'rb')
s2_wav = wave.open(sound2, 'rb')
s1_frames_len = s1_wav.getnframes()
s2_frames_len = s2_wav.getnframes()
s1_aryary = []
s2_aryary = []
s1_wav.setpos(chunk-step)
while s1_wav.tell() < s1_frames_len - chunk:
s1_wav.setpos(s1_wav.tell()-chunk+step)
data = s1_wav.readframes(chunk)
arya = abs(np.fromstring(data,np.int16))
if max(arya) < threshold:
continue
maxary = (0,0)
for i, m in enumerate(arya):
if maxary[1] < m:
maxary = (i,m)
if len(arya)-after > maxary[0] > before:
tempary = arya[maxary[0]-before: maxary[0]+after]
tempary = tempary - np.mean(tempary)
tempary = tempary / np.std(tempary)
s1_aryary.append(tempary)
s2_wav.setpos(chunk-step)
while s2_wav.tell() < s2_frames_len - chunk:
s2_wav.setpos(s2_wav.tell()-chunk+step)
data = s2_wav.readframes(chunk)
arya = abs(np.fromstring(data,np.int16))
if max(arya) < threshold:
continue
maxary = (0,0)
for i, m in enumerate(arya):
if maxary[1] < m:
maxary = (i,m)
if len(arya)-after > maxary[0] > before:
tempary = arya[maxary[0]-before: maxary[0]+after]
tempary = tempary - np.mean(tempary)
tempary = tempary / np.std(tempary)
s2_aryary.append(tempary)
print len(s1_aryary)
s1_sum = sum(s1_aryary)
print len(s2_aryary)
s1_fft_ary = [abs(np.fft.fft(i)) for i in s1_aryary]
s2_fft_ary = [abs(np.fft.fft(i)) for i in s2_aryary]
pca = KernelPCA(n_components=3,kernel='poly')
pca.whiten=True
result = pca.fit_transform(np.array(s1_fft_ary))
x = [i[0] for i in result]
y = [i[1] for i in result]
plt.scatter(x, y, color='r')
result = pca.fit_transform(np.array(s2_fft_ary))
x = [i[0] for i in result]
y = [i[1] for i in result]
plt.scatter(x, y, color='b')
plt.show() | mit |
samuelshaner/openmc | docs/source/conf.py | 1 | 7909 | # -*- coding: utf-8 -*-
#
# metasci documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 7 22:29:49 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# Determine if we're on Read the Docs server
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# On Read the Docs, we need to mock a few third-party modules so we don't get
# ImportErrors when building documentation
try:
from unittest.mock import MagicMock
except ImportError:
from mock import Mock as MagicMock
MOCK_MODULES = ['numpy', 'numpy.polynomial', 'numpy.polynomial.polynomial',
'h5py', 'pandas', 'opencg']
sys.modules.update((mod_name, MagicMock()) for mod_name in MOCK_MODULES)
import numpy as np
np.polynomial.Polynomial = MagicMock
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../sphinxext'))
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx_numfig',
'notebook_sphinxext']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OpenMC'
copyright = u'2011-2016, Massachusetts Institute of Technology'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.8"
# The full version, including alpha/beta/rc tags.
release = "0.8.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
#pygments_style = 'friendly'
#pygments_style = 'bw'
#pygments_style = 'fruity'
#pygments_style = 'manni'
pygments_style = 'tango'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_logo = '_images/openmc_logo.png'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "OpenMC Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
def setup(app):
app.add_stylesheet('theme_overrides.css')
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'openmcdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'openmc.tex', u'OpenMC Documentation',
u'Massachusetts Institute of Technology', 'manual'),
]
latex_elements = {
'preamble': r"""
\usepackage{enumitem}
\usepackage{amsfonts}
\usepackage{amsmath}
\setlistdepth{99}
\usepackage{tikz}
\usetikzlibrary{shapes,snakes,shadows,arrows,calc,decorations.markings,patterns,fit,matrix,spy}
\usepackage{fixltx2e}
\hypersetup{bookmarksdepth=3}
\setcounter{tocdepth}{2}
\numberwithin{equation}{section}
""",
'printindex': r""
}
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
#Autodocumentation Flags
#autodoc_member_order = "groupwise"
#autoclass_content = "both"
autosummary_generate = True
napoleon_use_ivar = True
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'matplotlib': ('http://matplotlib.org/', None)
}
| mit |
knossos-project/knossos-python-tools | knossos_utils/skeleton_plotting.py | 3 | 10841 | ################################################################################
# This file provides a functions and classes for working with synapse annotations.
# and writing raw and overlay data.
#
# (C) Copyright 2017
# Max-Planck-Gesellschaft zur Foerderung der Wissenschaften e.V.
#
# skeleton_plotting.py is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 of
# the License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#
################################################################################
"""
Class and function definitions that allow the plotting of SkeletoAnnotation objects and Synapse objects.
"""
try:
import mayavi.mlab as mlab
except:
print("mayavi not installed")
import numpy as np
import matplotlib as mplt
import random
def add_spheres_to_mayavi_window(sphere_coords,
radii,
color = (0.0, 1.0, 0.0, 1.0),
resolution = 20.,
mode='sphere'):
'''
Adds spheres to the current mayavi window.
'''
### DISCLAIMER: IT IS NOT CLEAR WHETHER THE DEFINED
### SPHERE SIZE IS A RADIUS OR DIAMETER OR SOME MYSTERIOUS "SIZE";
### The documentation is not conclusive
try:
_ = (e for e in radii)
except TypeError:
radii = np.ones(len(sphere_coords)/3) * radii
coords = np.array(sphere_coords)
sc = np.hsplit(coords, 3)
try:
x = [el[0] for el in sc[0].tolist()]
y = [el[0] for el in sc[1].tolist()]
z = [el[0] for el in sc[2].tolist()]
except:
x = [el for el in sc[0].tolist()]
y = [el for el in sc[1].tolist()]
z = [el for el in sc[2].tolist()]
#raise()
mlab.points3d(x, y, z, radii, color = color[0:3],
scale_factor = 1.0, resolution = 20, opacity = color[3],
mode=mode)
return
def add_synapses_to_mayavi_window(syns,
synapse_location = 'pre',
color=(0.0, 1.0, 0.0, 1.0),
diameter_scale = 1.,
all_same_size = 0.):
'''
Adds human generated synapse objects to the current mayavi window.
Synapses are rendered as spheres, with syn.AZlen used as diameter.
Parameters
----------
synapses : Iterable of Synapse instances
E.g. a list of synapse lists, where every list contains the synapses
extracted from the same annotation.
synapse_location : str
Either 'pre', 'post', 'pre_post_average' or 'az_average'.
color : tuple
rgba values between 0 and 1
diameter_scale : float
scaling factor to apply to the az_len (interpreted as diameter)
to radius conversion; this can be helpful for small synapses
'''
if synapse_location == 'pre':
coords = [syn.preNodeCoord_scaled for syn in syns]
elif synapse_location == 'post':
coords = [syn.postNodeCoord_scaled for syn in syns]
elif synapse_location == 'pre_post_average':
coords = [syn.avgPrePostCoord_scaled for syn in syns]
elif synapse_location == 'az_average':
coords = [syn.az_center_of_mass for syn in syns]
else:
raise Exception('Unsupported synapse_location given: '
+ synapse_location)
if not all_same_size > 0.:
radii = [syn.az_len for syn in syns]
radii = (np.array(radii) / 2.) * diameter_scale
else:
radii = [all_same_size] * len(syns)
add_spheres_to_mayavi_window(coords, radii, color)
return
def get_different_rgba_colors(num_colors,
rgb_only = False,
set_alpha = 1.0):
"""
Parameters
----------
num_colors : int
Number of randomly picked colors to return (not necessarily unique)
set_alpha : float
alpha value to add to the rgb colors
Returns
list of random rgba colors
"""
# create matplotlib color converter object
cc_mplt = mplt.colors.ColorConverter()
# this is a bit ugly, the matplotlib converter actually supports all web
# colors, see eg http://www.w3schools.com/html/html_colornames.asp Here I
# just handpicked a few ones, would be better to make all of them
# available programmatically, or to actually implement a proper
# perception-based color selection module
hand_picked_colors = ['LightPink',
'DeepPink',
'Crimson',
'DarkRed',
'OrangeRed',
'DarkOrange',
'Yellow',
'DarkKhaki',
'MediumTurquoise',
'MediumBlue',
'LightSkyBlue',
'Magenta',
'Thistle',
'DarkOliveGreen',
'MediumSpringGreen']
colors = []
for _ in xrange(num_colors):
if rgb_only:
colors.append(cc_mplt.to_rgb(random.choice(hand_picked_colors)))
else:
colors.append(cc_mplt.to_rgba(random.choice(hand_picked_colors)))
return colors
def visualize_anno_with_synapses(anno, syns):
"""
Visualizes an annotation together with synapses. If syns is an iterable
of iterables of Synapse objects, the iterables will be given an
inidividual color (useful to look at redundant synapse annotations on
the same annotation)
Parameters
----------
anno : Iterable of Synapse instances
E.g. a list of synapse lists, where every list contains the synapses
extracted from the same annotation.
syns : iterable or iterable of iterables of Synapse objects
"""
visualize_annotation(anno)
if is_iterable_of_iterables(syns):
for same_syns in syns:
add_synapses_to_mayavi_window(same_syns, color = ())
else:
add_synapses_to_mayavi_window(syns, color = (0.0, 0.0, 1.0, 0.5))
return
def is_iterable_of_iterables(item):
try:
t = (e for e in item)
tt = (e for e in t)
return True
except TypeError:
return False
def add_anno_to_mayavi_window(anno,
node_scaling = 1.0,
override_node_radius = 500.,
edge_radius = 250.,
show_outline = False,
dataset_identifier='',
opacity=1):
'''
Adds an annotation to a currently open mayavi plotting window
Parameters: anno: annotation object
node_scaling: float, scaling factor for node radius
edge_radius: float, radius of tubes for each edge
'''
# plot the nodes
# x, y, z are numpy arrays, s as well
if type(anno) == list:
nodes = []
for this_anno in anno:
nodes.extend(this_anno.getNodes())
color = anno[0].color
else:
nodes = list(anno.getNodes())
color = anno.color
coords = np.array([node.getCoordinate_scaled() for node in nodes])\
#* node_scaling
sc = np.hsplit(coords, 3)
# separate x, y and z; mlab needs that
#datasetDims = np.array(anno.datasetDims)
x = [el[0] for el in sc[0].tolist()]
y = [el[0] for el in sc[1].tolist()]
z = [el[0] for el in sc[2].tolist()]
if override_node_radius > 0.:
s = [override_node_radius]*len(nodes)
else:
s = [node.getDataElem('radius') for node in nodes]
s = np.array(s)
s = s * node_scaling
#s[0] = 5000
#extent=[1, 108810, 1, 106250, 1, 115220]
#raise
pts = mlab.points3d(x, y, z, s, color=color, scale_factor=1.0,
opacity=opacity)
# dict for faster lookup, nodes.index(node) adds O(n^2)
nodeIndexMapping = {}
for nodeIndex, node in enumerate(nodes):
nodeIndexMapping[node] = nodeIndex
edges = []
for node in nodes:
for child in node.getChildren():
try:
edges.append((nodeIndexMapping[node], nodeIndexMapping[child]))
except:
print('Phantom child node, annotation object inconsistent')
# plot the edges
pts.mlab_source.dataset.lines = np.array(edges)
pts.mlab_source.update()
tube = mlab.pipeline.tube(pts, tube_radius = edge_radius)
mlab.pipeline.surface(tube, color = anno.color)
if show_outline:
if dataset_identifier == 'j0126':
mlab.outline(extent=(0,108810,0,106250,0,115220), opacity=0.5,
line_width=5.)
elif dataset_identifier == 'j0251':
mlab.outline(extent=(0,270000,0,270000,0,387350), opacity=0.5,
line_width=5.)
elif dataset_identifier == 'j0256':
mlab.outline(extent=(0,166155,0,166155,0,77198), opacity=0.5,
line_width=1., color=(0.5,0.5,0.5))
else:
print('Please add a dataset identifier string')
return
def visualize_annotation(anno,
node_scaling = 1.0,
override_node_radius = 500.,
edge_radius = 250.0,
bg_color = (1.0, 1.0, 1.0),
dataset_identifier='',
show_outline=True,
figure_size_px = (600, 600)):
'''
Creates a new mayavi window and adds the annotation to it.
Make sure that the edge radius is half of the node radius to avoid ugly skeleton renderings.
'''
figure_size = figure_size_px
mlab.figure(None, bg_color,
fgcolor = (0.0, 0.0, 0.0),
size=(600, 600))
mlab.clf()
#if type(anno) == list:
# for cur_anno in anno:
# add_anno_to_mayavi_window(cur_anno, node_scaling, edge_radius)
#else:
add_anno_to_mayavi_window(anno,
node_scaling=node_scaling,
override_node_radius=override_node_radius,
edge_radius=edge_radius,
show_outline=show_outline,
dataset_identifier=dataset_identifier)
#mlab.view(49, 31.5, 52.8, (4.2, 37.3, 20.6))
#mlab.xlabel('x')
#mlab.ylabel('y')
#mlab.zlabel('z')
| gpl-2.0 |
fboers/jumeg | jumeg/decompose/ica.py | 1 | 24050 | # Authors: Lukas Breuer <[email protected]>
'''
Created on 27.11.2015
@author: lbreuer
'''
#######################################################
# #
# import necessary modules #
# #
#######################################################
from scipy.stats import kurtosis
import math
import numpy as np
#######################################################
# #
# interface to perform (extended) Infomax ICA on #
# a data array #
# #
#######################################################
def ica_array(data_orig, dim_reduction='',
explainedVar=1.0, overwrite=None,
max_pca_components=None, method='infomax',
cost_func='logcosh', weights=None, lrate=None,
block=None, wchange=1e-16, annealdeg=60.,
annealstep=0.9, n_subgauss=1, kurt_size=6000,
maxsteps=200, pca=None, verbose=True):
"""
interface to perform (extended) Infomax or FastICA on a data array
Parameters
----------
data_orig : array of data to be decomposed [nchan, ntsl].
dim_reduction : {'', 'AIC', 'BIC', 'GAP', 'MDL', 'MIBS', 'explVar'}
Method for dimension selection. For further information about
the methods please check the script 'dimension_selection.py'.
default: dim_reduction='' --> no dimension reduction is performed
as long as not the parameter
'max_pca_components' is set.
explainedVar : float
Value between 0 and 1; components will be selected by the
cumulative percentage of explained variance.
overwrite : if set the data array will be overwritten
(this saves memory)
default: overwrite=None
max_pca_components : int | None
The number of components used for PCA decomposition. If None, no
dimension reduction will be applied and max_pca_components will equal
the number of channels supplied on decomposing data. Only of interest
when dim_reduction=''
method : {'fastica', 'infomax', 'extended-infomax'}
The ICA method to use. Defaults to 'infomax'.
FastICA parameter:
-----------------------------
cost_func : String
Cost function to use in FastICA algorithm. Could be
either 'logcosh', 'exp' or 'cube'.
(Extended) Infomax parameter:
-----------------------------
weights : initialize weights matrix
default: None --> identity matrix is used
lrate : initial learning rate (for most applications 1e-3 is
a good start)
--> smaller learining rates will slowering the convergence
it merely indicates the relative size of the change in weights
default: lrate = 0.010d/alog(nchan^2.0)
block : his block size used to randomly extract (in time) a chop
of data
default: block = floor(sqrt(ntsl/3d))
wchange : iteration stops when weight changes are smaller then this
number
default: wchange = 1e-16
annealdeg : if angle delta is larger then annealdeg (in degree) the
learning rate will be reduce
default: annealdeg = 60
annealstep : the learning rate will be reduced by this factor:
lrate *= annealstep
default: annealstep = 0.9
extended : if set extended Infomax ICA is performed
default: None
n_subgauss : extended=int
The number of subgaussian components. Only considered for extended
Infomax.
default: n_subgauss=1
kurt_size : int
The window size for kurtosis estimation. Only considered for extended
Infomax.
default: kurt_size=6000
maxsteps : maximum number of iterations to be done
default: maxsteps = 200
Returns
-------
weights : un-mixing matrix
pca : instance of PCA
Returns the instance of PCA where all information about the
PCA decomposition are stored.
activations : underlying sources
"""
# -------------------------------------------
# check overwrite option
# -------------------------------------------
if overwrite == None:
data = data_orig.copy()
else:
data = data_orig
# -------------------------------------------
# perform centering and whitening of the data
# -------------------------------------------
if pca:
# perform centering and whitening
dmean = data.mean(axis=-1)
stddev = np.std(data, axis=-1)
dnorm = (data - dmean[:, np.newaxis])/stddev[:, np.newaxis]
data = np.dot(dnorm.T, pca.components_[:max_pca_components].T)
# update mean and standard-deviation in PCA object
pca.mean_ = dmean
pca.stddev_ = stddev
else:
if verbose:
print(" ... perform centering and whitening ...")
data, pca = whitening(data.T, dim_reduction=dim_reduction, npc=max_pca_components,
explainedVar=explainedVar)
# -------------------------------------------
# now call ICA algortithm
# -------------------------------------------
# FastICA
if method == 'fastica':
from sklearn.decomposition import fastica
_, unmixing_, sources_ = fastica(data, fun=cost_func, max_iter=maxsteps, tol=1e-4,
whiten=True)
activations = sources_.T
weights = unmixing_
# Infomax or extended Infomax
else:
if method == 'infomax':
extended = False
elif method == 'extended-infomax':
extended = True
else:
print(">>>> WARNING: Entered ICA method not found!")
print(">>>> Allowed are fastica, extended-infomax and infomax")
print(">>>> Using now the default ICA method which is Infomax")
extended = False
weights = infomax(data, weights=weights, l_rate=lrate, block=block,
w_change=wchange, anneal_deg=annealdeg, anneal_step=annealstep,
extended=extended, n_subgauss=n_subgauss, kurt_size=kurt_size,
max_iter=maxsteps, verbose=verbose)
activations = np.dot(weights, data.T)
# return results
return weights, pca, activations
#######################################################
# #
# interface to perform (extended) Infomax ICA on #
# a data array #
# #
#######################################################
def infomax2data(weights, pca, activations, idx_zero=None):
"""
interface to perform (extended) Infomax ICA on a data array
Parameters
----------
weights : un-mixing matrix
pca : instance of PCA object
activations : underlying sources
idx_zero : indices of independent components (ICs) which
should be removed
default: idx_zero=None --> not IC is removed
Returns
-------
data : backtransformed cleaned data array
"""
# -------------------------------------------
# import necessary modules
# -------------------------------------------
from scipy.linalg import pinv
# -------------------------------------------
# check dimension of the input data
# -------------------------------------------
npc = len(weights)
nchan = len(pca.components_)
ntsl = activations.shape[1]
# create array for principal components
pc = np.zeros((nchan, ntsl))
# -------------------------------------------
# backtransform data
# -------------------------------------------
iweights = pinv(weights)
if idx_zero is not None:
iweights[:, idx_zero] = 0.
pc[:npc] = np.dot(iweights, activations) # back-transform to PCA-space
data = np.dot(pca.components_.T, pc) # back-transform to data-space
del pc # delete principal components
data = (data * pca.stddev_[:, np.newaxis]) + pca.mean_[:, np.newaxis] # reverse normalization
# return results
return data
#######################################################
# #
# routine for PCA decomposition prior to ICA #
# #
#######################################################
def whitening(data, dim_reduction='',
npc=None, explainedVar=1.0):
"""
routine to perform whitening prior to Infomax ICA application
(whitening is based on Principal Component Analysis from the
RandomizedPCA package from sklearn.decomposition)
Parameters
----------
X : data array [ntsl, nchan] for decomposition.
dim_reduction : {'', 'AIC', 'BIC', 'GAP', 'MDL', 'MIBS', 'explVar'}
Method for dimension selection. For further information about
the methods please check the script 'dimension_selection.py'.
default: dim_reduction='' --> no dimension reduction is performed as
long as not the parameter 'npc' is set.
npc : int | None
The number of components used for PCA decomposition. If None, no
dimension reduction will be applied and max_pca_components will equal
the number of channels supplied on decomposing data. Only of interest
when dim_reduction=''
default: npc = None
explainedVar : float | None
Must be between 0 and 1. If float, the number of components
selected matches the number of components with a cumulative
explained variance of 'explainedVar'
default: explainedVar = None
Returns
-------
whitened_data : data array [nchan, ntsl] of decomposed sources
ica : instance of ICA
Returns the instance of ICA where all information about the
PCA decomposition are updated.
sel : array containing the indices of the selected ICs
(depends on the variable npc)
"""
# -------------------------------------------
# import necessary modules
# -------------------------------------------
from sklearn.decomposition import RandomizedPCA
from . import dimension_selection as dim_sel
# -------------------------------------------
# check input data
# -------------------------------------------
ntsl, nchan = data.shape
if (nchan < 2) or (ntsl < nchan):
raise ValueError('Data size too small!')
# -------------------------------------------
# perform PCA decomposition
# -------------------------------------------
X = data.copy()
whiten = False
dmean = X.mean(axis=0)
stddev = np.std(X, axis=0)
X = (X - dmean[np.newaxis, :]) / stddev[np.newaxis, :]
pca = RandomizedPCA(n_components=None, whiten=whiten,
copy=True)
# -------------------------------------------
# perform whitening
# -------------------------------------------
whitened_data = pca.fit_transform(X)
# -------------------------------------------
# update PCA structure
# -------------------------------------------
pca.mean_ = dmean
pca.stddev_ = stddev
# -------------------------------------------
# check dimension selection
# -------------------------------------------
if dim_reduction == 'AIC':
npc, _ = dim_sel.aic_mdl(pca.explained_variance_)
elif dim_reduction == 'BIC':
npc = dim_sel.mibs(pca.explained_variance_, ntsl,
use_bic=True)
elif dim_reduction == 'GAP':
npc = dim_sel.gap(pca.explained_variance_)
elif dim_reduction == 'MDL':
_, npc = dim_sel.aic_mdl(pca.explained_variance_)
elif dim_reduction == 'MIBS':
npc = dim_sel.mibs(pca.explained_variance_, ntsl,
use_bic=False)
elif dim_reduction == 'explVar':
# compute explained variance manually
explained_variance_ratio_ = pca.explained_variance_
explained_variance_ratio_ /= explained_variance_ratio_.sum()
npc = np.sum(explained_variance_ratio_.cumsum() <= explainedVar)
elif npc is None:
npc = nchan
# return results
return whitened_data[:, :(npc+1)], pca
#######################################################
# #
# real Infomax implementation #
# #
#######################################################
def infomax(data, weights=None, l_rate=None, block=None, w_change=1e-12,
anneal_deg=60., anneal_step=0.9, extended=False, n_subgauss=1,
kurt_size=6000, ext_blocks=1, max_iter=200,
fixed_random_state=None, verbose=None):
"""
Run the (extended) Infomax ICA decomposition on raw data
based on the publications of Bell & Sejnowski 1995 (Infomax)
and Lee, Girolami & Sejnowski, 1999 (extended Infomax)
Parameters
----------
data : np.ndarray, shape (n_samples, n_features)
The data to unmix.
w_init : np.ndarray, shape (n_features, n_features)
The initialized unmixing matrix. Defaults to None. If None, the
identity matrix is used.
l_rate : float
This quantity indicates the relative size of the change in weights.
Note. Smaller learining rates will slow down the procedure.
Defaults to 0.010d / alog(n_features ^ 2.0)
block : int
The block size of randomly chosen data segment.
Defaults to floor(sqrt(n_times / 3d))
w_change : float
The change at which to stop iteration. Defaults to 1e-12.
anneal_deg : float
The angle at which (in degree) the learning rate will be reduced.
Defaults to 60.0
anneal_step : float
The factor by which the learning rate will be reduced once
``anneal_deg`` is exceeded:
l_rate *= anneal_step
Defaults to 0.9
extended : bool
Wheather to use the extended infomax algorithm or not. Defaults to
True.
n_subgauss : int
The number of subgaussian components. Only considered for extended
Infomax.
kurt_size : int
The window size for kurtosis estimation. Only considered for extended
Infomax.
ext_blocks : int
The number of blocks after which to recompute Kurtosis.
Only considered for extended Infomax.
max_iter : int
The maximum number of iterations. Defaults to 200.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
unmixing_matrix : np.ndarray of float, shape (n_features, n_features)
The linear unmixing operator.
"""
# define some default parameter
max_weight = 1e8
restart_fac = 0.9
min_l_rate = 1e-10
blowup = 1e4
blowup_fac = 0.5
n_small_angle = 200
degconst = 180.0 / np.pi
# for extended Infomax
extmomentum = 0.5
signsbias = 0.02
signcount_threshold = 25
signcount_step = 2
if ext_blocks > 0: # allow not to recompute kurtosis
n_subgauss = 1 # but initialize n_subgauss to 1 if you recompute
# check data shape
n_samples, n_features = data.shape
n_features_square = n_features ** 2
# check input parameter
# heuristic default - may need adjustment for
# large or tiny data sets
if l_rate is None:
l_rate = 0.01 / math.log(n_features ** 2.0)
if block is None:
block = int(math.floor(math.sqrt(n_samples / 3.0)))
if verbose:
print('computing%sInfomax ICA' % ' Extended ' if extended is True else ' ')
# collect parameter
nblock = n_samples // block
lastt = (nblock - 1) * block + 1
# initialize training
if weights is None:
# initialize weights as identity matrix
weights = np.identity(n_features, dtype=np.float64)
BI = block * np.identity(n_features, dtype=np.float64)
bias = np.zeros((n_features, 1), dtype=np.float64)
onesrow = np.ones((1, block), dtype=np.float64)
startweights = weights.copy()
oldweights = startweights.copy()
step = 0
count_small_angle = 0
wts_blowup = False
blockno = 0
signcount = 0
# for extended Infomax
if extended is True:
signs = np.identity(n_features)
signs.flat[slice(0, n_features * n_subgauss, n_features)]
kurt_size = min(kurt_size, n_samples)
old_kurt = np.zeros(n_features, dtype=np.float64)
oldsigns = np.zeros((n_features, n_features))
# trainings loop
olddelta, oldchange = 1., 0.
while step < max_iter:
# shuffle data at each step
if fixed_random_state:
np.random.seed(step) # --> permutation is fixed but differs at each step
else:
np.random.seed(None)
permute = list(range(n_samples))
np.random.shuffle(permute)
# ICA training block
# loop across block samples
for t in range(0, lastt, block):
u = np.dot(data[permute[t:t + block], :], weights)
u += np.dot(bias, onesrow).T
if extended is True:
# extended ICA update
y = np.tanh(u)
weights += l_rate * np.dot(weights,
BI - np.dot(np.dot(u.T, y), signs) -
np.dot(u.T, u))
bias += l_rate * np.reshape(np.sum(y, axis=0,
dtype=np.float64) * -2.0,
(n_features, 1))
else:
# logistic ICA weights update
y = 1.0 / (1.0 + np.exp(-u))
weights += l_rate * np.dot(weights,
BI + np.dot(u.T, (1.0 - 2.0 * y)))
bias += l_rate * np.reshape(np.sum((1.0 - 2.0 * y), axis=0,
dtype=np.float64), (n_features, 1))
# check change limit
max_weight_val = np.max(np.abs(weights))
if max_weight_val > max_weight:
wts_blowup = True
blockno += 1
if wts_blowup:
break
# ICA kurtosis estimation
if extended is True:
n = np.fix(blockno / ext_blocks)
if np.abs(n) * ext_blocks == blockno:
if kurt_size < n_samples:
rp = np.floor(np.random.uniform(0, 1, kurt_size) *
(n_samples - 1))
tpartact = np.dot(data[rp.astype(int), :], weights).T
else:
tpartact = np.dot(data, weights).T
# estimate kurtosis
kurt = kurtosis(tpartact, axis=1, fisher=True)
if extmomentum != 0:
kurt = (extmomentum * old_kurt +
(1.0 - extmomentum) * kurt)
old_kurt = kurt
# estimate weighted signs
signs.flat[::n_features + 1] = ((kurt + signsbias) /
np.abs(kurt + signsbias))
ndiff = ((signs.flat[::n_features + 1] -
oldsigns.flat[::n_features + 1]) != 0).sum()
if ndiff == 0:
signcount += 1
else:
signcount = 0
oldsigns = signs
if signcount >= signcount_threshold:
ext_blocks = np.fix(ext_blocks * signcount_step)
signcount = 0
# here we continue after the for
# loop over the ICA training blocks
# if weights in bounds:
if not wts_blowup:
oldwtchange = weights - oldweights
step += 1
angledelta = 0.0
delta = oldwtchange.reshape(1, n_features_square)
change = np.sum(delta * delta, dtype=np.float64)
if verbose:
from sys import stdout
info = "\r" if iter > 0 else ""
info += ">>> Step %4d of %4d; wchange: %1.4e" % (step+1, max_iter, change)
stdout.write(info)
stdout.flush()
if step > 1:
angledelta = math.acos(np.sum(delta * olddelta) /
math.sqrt(change * oldchange))
angledelta *= degconst
# anneal learning rate
oldweights = weights.copy()
if angledelta > anneal_deg:
l_rate *= anneal_step # anneal learning rate
# accumulate angledelta until anneal_deg reached l_rates
olddelta = delta
oldchange = change
count_small_angle = 0 # reset count when angle delta is large
else:
if step == 1: # on first step only
olddelta = delta # initialize
oldchange = change
count_small_angle += 1
if count_small_angle > n_small_angle:
max_iter = step
# apply stopping rule
if step > 2 and change < w_change:
step = max_iter
elif change > blowup:
l_rate *= blowup_fac
# restart if weights blow up
# (for lowering l_rate)
else:
step = 0 # start again
wts_blowup = 0 # re-initialize variables
blockno = 1
l_rate *= restart_fac # with lower learning rate
weights = startweights.copy()
oldweights = startweights.copy()
olddelta = np.zeros((1, n_features_square), dtype=np.float64)
bias = np.zeros((n_features, 1), dtype=np.float64)
# for extended Infomax
if extended:
signs = np.identity(n_features)
signs.flat[slice(0, n_features * n_subgauss, n_features)]
oldsigns = np.zeros((n_features, n_features))
if l_rate > min_l_rate:
if verbose:
print('... lowering learning rate to %g \n... re-starting...' % l_rate)
else:
raise ValueError('Error in Infomax ICA: unmixing_matrix matrix'
'might not be invertible!')
# prepare return values
return weights.T
| bsd-3-clause |
stephen2run/EcoDataLearn | src/example/data_visualization.py | 1 | 2266 | import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
from sklearn.metrics import accuracy_score
# visulaize the important characteristics of the dataset
import matplotlib.pyplot as plt
# step 1: download the data
dataframe_all = pd.read_csv("https://d396qusza40orc.cloudfront.net/predmachlearn/pml-training.csv")
num_rows = dataframe_all.shape[0]
# step 2: remove useless data
# count the number of missing elements (NaN) in each column
counter_nan = dataframe_all.isnull().sum()
counter_without_nan = counter_nan[counter_nan==0]
# remove the columns with missing elements
dataframe_all = dataframe_all[counter_without_nan.keys()]
# remove the first 7 columns which contain no discriminative information
dataframe_all = dataframe_all.ix[:,7:]
# the list of columns (the last column is the class label)
columns = dataframe_all.columns
print(columns)
# step 3: get features (x) and scale the features
# get x and convert it to numpy array
x = dataframe_all.ix[:,:-1].values
standard_scaler = StandardScaler()
x_std = standard_scaler.fit_transform(x)
# step 4: get class labels y and then encode it into number
# get class label data
y = dataframe_all.ix[:,-1].values
# encode the class label
class_labels = np.unique(y)
label_encoder = LabelEncoder()
y = label_encoder.fit_transform(y)
# step 5: split the data into training set and test set
test_percentage = 0.1
x_train, x_test, y_train, y_test = train_test_split(x_std, y, test_size = test_percentage, random_state = 0)
# t-distributed Stochastic Neighbor Embedding (t-SNE) visualization
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2, random_state=0)
x_test_2d = tsne.fit_transform(x_test)
# scatter plot the sample points among 5 classes
markers=('s', 'd', 'o', '^', 'v')
color_map = {0:'red', 1:'blue', 2:'lightgreen', 3:'purple', 4:'cyan'}
plt.figure()
for idx, cl in enumerate(np.unique(y_test)):
plt.scatter(x=x_test_2d[y_test==cl,0], y=x_test_2d[y_test==cl,1], c=color_map[idx], marker=markers[idx], label=cl)
plt.xlabel('X in t-SNE')
plt.ylabel('Y in t-SNE')
plt.legend(loc='upper left')
plt.title('t-SNE visualization of test data')
plt.show()
| apache-2.0 |
dsquareindia/scikit-learn | examples/gaussian_process/plot_gpc_iris.py | 100 | 2269 | """
=====================================================
Gaussian process classification (GPC) on iris dataset
=====================================================
This example illustrates the predicted probability of GPC for an isotropic
and anisotropic RBF kernel on a two-dimensional version for the iris-dataset.
The anisotropic RBF kernel obtains slightly higher log-marginal-likelihood by
assigning different length-scales to the two feature dimensions.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = np.array(iris.target, dtype=int)
h = .02 # step size in the mesh
kernel = 1.0 * RBF([1.0])
gpc_rbf_isotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
kernel = 1.0 * RBF([1.0, 1.0])
gpc_rbf_anisotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
titles = ["Isotropic RBF", "Anisotropic RBF"]
plt.figure(figsize=(10, 5))
for i, clf in enumerate((gpc_rbf_isotropic, gpc_rbf_anisotropic)):
# Plot the predicted probabilities. For that, we will assign a color to
# each point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 2, i + 1)
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape((xx.shape[0], xx.shape[1], 3))
plt.imshow(Z, extent=(x_min, x_max, y_min, y_max), origin="lower")
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=np.array(["r", "g", "b"])[y],
edgecolors=(0, 0, 0))
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title("%s, LML: %.3f" %
(titles[i], clf.log_marginal_likelihood(clf.kernel_.theta)))
plt.tight_layout()
plt.show()
| bsd-3-clause |
liyu1990/sklearn | sklearn/metrics/tests/test_score_objects.py | 17 | 14051 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
| bsd-3-clause |
pjryan126/solid-start-careers | store/api/zillow/venv/lib/python2.7/site-packages/pandas/sparse/scipy_sparse.py | 18 | 5516 | """
Interaction with scipy.sparse matrices.
Currently only includes SparseSeries.to_coo helpers.
"""
from pandas.core.index import MultiIndex, Index
from pandas.core.series import Series
from pandas.compat import OrderedDict, lmap
def _check_is_partition(parts, whole):
whole = set(whole)
parts = [set(x) for x in parts]
if set.intersection(*parts) != set():
raise ValueError(
'Is not a partition because intersection is not null.')
if set.union(*parts) != whole:
raise ValueError('Is not a partition because union is not the whole.')
def _to_ijv(ss, row_levels=(0, ), column_levels=(1, ), sort_labels=False):
""" For arbitrary (MultiIndexed) SparseSeries return
(v, i, j, ilabels, jlabels) where (v, (i, j)) is suitable for
passing to scipy.sparse.coo constructor. """
# index and column levels must be a partition of the index
_check_is_partition([row_levels, column_levels], range(ss.index.nlevels))
# from the SparseSeries: get the labels and data for non-null entries
values = ss._data.internal_values()._valid_sp_values
nonnull_labels = ss.dropna()
def get_indexers(levels):
""" Return sparse coords and dense labels for subset levels """
# TODO: how to do this better? cleanly slice nonnull_labels given the
# coord
values_ilabels = [tuple(x[i] for i in levels)
for x in nonnull_labels.index]
if len(levels) == 1:
values_ilabels = [x[0] for x in values_ilabels]
# # performance issues with groupby ###################################
# TODO: these two lines can rejplace the code below but
# groupby is too slow (in some cases at least)
# labels_to_i = ss.groupby(level=levels, sort=sort_labels).first()
# labels_to_i[:] = np.arange(labels_to_i.shape[0])
def _get_label_to_i_dict(labels, sort_labels=False):
""" Return OrderedDict of unique labels to number.
Optionally sort by label.
"""
labels = Index(lmap(tuple, labels)).unique().tolist() # squish
if sort_labels:
labels = sorted(list(labels))
d = OrderedDict((k, i) for i, k in enumerate(labels))
return (d)
def _get_index_subset_to_coord_dict(index, subset, sort_labels=False):
def robust_get_level_values(i):
# if index has labels (that are not None) use those,
# else use the level location
try:
return index.get_level_values(index.names[i])
except KeyError:
return index.get_level_values(i)
ilabels = list(zip(*[robust_get_level_values(i) for i in subset]))
labels_to_i = _get_label_to_i_dict(ilabels,
sort_labels=sort_labels)
labels_to_i = Series(labels_to_i)
if len(subset) > 1:
labels_to_i.index = MultiIndex.from_tuples(labels_to_i.index)
labels_to_i.index.names = [index.names[i] for i in subset]
labels_to_i.name = 'value'
return (labels_to_i)
labels_to_i = _get_index_subset_to_coord_dict(ss.index, levels,
sort_labels=sort_labels)
# #####################################################################
# #####################################################################
i_coord = labels_to_i[values_ilabels].tolist()
i_labels = labels_to_i.index.tolist()
return i_coord, i_labels
i_coord, i_labels = get_indexers(row_levels)
j_coord, j_labels = get_indexers(column_levels)
return values, i_coord, j_coord, i_labels, j_labels
def _sparse_series_to_coo(ss, row_levels=(0, ), column_levels=(1, ),
sort_labels=False):
""" Convert a SparseSeries to a scipy.sparse.coo_matrix using index
levels row_levels, column_levels as the row and column
labels respectively. Returns the sparse_matrix, row and column labels.
"""
import scipy.sparse
if ss.index.nlevels < 2:
raise ValueError('to_coo requires MultiIndex with nlevels > 2')
if not ss.index.is_unique:
raise ValueError('Duplicate index entries are not allowed in to_coo '
'transformation.')
# to keep things simple, only rely on integer indexing (not labels)
row_levels = [ss.index._get_level_number(x) for x in row_levels]
column_levels = [ss.index._get_level_number(x) for x in column_levels]
v, i, j, rows, columns = _to_ijv(ss, row_levels=row_levels,
column_levels=column_levels,
sort_labels=sort_labels)
sparse_matrix = scipy.sparse.coo_matrix(
(v, (i, j)), shape=(len(rows), len(columns)))
return sparse_matrix, rows, columns
def _coo_to_sparse_series(A, dense_index=False):
""" Convert a scipy.sparse.coo_matrix to a SparseSeries.
Use the defaults given in the SparseSeries constructor.
"""
s = Series(A.data, MultiIndex.from_arrays((A.row, A.col)))
s = s.sort_index()
s = s.to_sparse() # TODO: specify kind?
if dense_index:
# is there a better constructor method to use here?
i = range(A.shape[0])
j = range(A.shape[1])
ind = MultiIndex.from_product([i, j])
s = s.reindex_axis(ind)
return s
| gpl-2.0 |
isomerase/RoboSkeeter | roboskeeter/math/segmented_correlation_analysis.py | 2 | 16511 | # -*- coding: utf-8 -*-
"""
Created on Wed May 20 17:18:13 2015
@author: richard
TODO meeting
manually plot
fix colors
fix data matrix
"""
import csv
import os
import time
from glob import glob
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from statsmodels.tsa.stattools import acf
from statsmodels.tsa.stattools import pacf
from roboskeeter.io import i_o
INTERESTED_VALS = ['velo_x', 'velo_y', 'velo_z', 'curve']
#WINDOW_LEN = int(floor( len(df.index)/ 5 ))
WINDOW_LEN = 100
LAGS = 20
MIN_TRAJECTORY_LEN = 400
CONFINT_THRESH = 0.5
#
## testing parrallelizing code
#def easy_parallize(f, sequence):
# # I didn't see gains with .dummy; you might
# from multiprocessing import Pool
# pool = Pool(processes=8)
# #from multiprocessing.dummy import Pool
# #pool = Pool(16)
#
# # f is given sequence. guaranteed to be in order
# result = pool.map(f, sequence)
# cleaned = [x for x in result if not x is None]
# cleaned = asarray(cleaned)
# # not optimal but safe
# pool.close()
# pool.join()
# return cleaned
# make list of all csvs in dir
def make_csv_name_list():
# TODO export this to io
dyn_traj_reldir = "data/dynamical_trajectories/"
print "Loading + filtering CSV files from ", dyn_traj_reldir
os.chdir(dyn_traj_reldir)
csv_list = sorted([os.path.splitext(file)[0] for file in glob("*.csv") if sum(1 for row in csv.reader(open(file))) > MIN_TRAJECTORY_LEN])
# csv_list = sorted([os.path.splitext(file)[0] for file in glob("*.csv")])
os.chdir(os.path.dirname(__file__))
return csv_list
def csvList2df(csv_list):
# TODO export this to io
print "Extracting csv data."
df_list = []
for csv_fname in csv_list:
df = i_o.load_single_csv_to_df(csv_fname)
df_vars = df[INTERESTED_VALS] # slice only cols we want
df_vars['log_curve'] = np.log(df_vars.loc[:,'curve'])
df_list.append(df_vars)
INTERESTED_VALS.append('log_curve')
return pd.concat(df_list)
def DF2analyzedSegments(DF):
print "Segmenting data, running ACF + PACF analysis."
# analyzed_segment_list = []
# super_data_list = []
# confint_lower_list = []
# confint_upper_list = []
filtered_data_list = []
t0 = time.time()
for csv_fname, DF in trajectory_DF.groupby(level=0):
# pool = mp.Pool(processes = 4)
# results = [pool.apply(segment_analysis, args=(csv_fname, DF)) for csv_fname, DF in DF_dict.iteritems()]
# analysis_df, super_data_matrix = segment_analysis(csv_fname, DF)
# super_data_panel, confint_lower, confint_upper = segment_analysis(csv_fname, DF)
filtered_panel = segment_analysis(csv_fname, DF)
# if analysis_df is None: # our df was too small to analyze
# pass
# else:
# analyzed_segment_list.append(analysis_df)
# super_data_list.append(super_data_panel)
# confint_lower_list.append(confint_lower)
# confint_upper_list.append(confint_upper)
filtered_data_list.append(filtered_panel)
t1 = time.time()
print "Segment analysis finished in %f seconds." % (t1-t0)
# return pd.concat(results)
# return pd.concat(analyzed_segment_list), super_data_matrix
# print super_data_list
return pd.concat(filtered_data_list, axis=1)
# return pd.concat(super_data_list, axis=1), pd.concat(confint_lower_list, axis=1), pd.concat(confint_upper_list, axis=1),
def segment_analysis(csv_fname, trajectory_df):
# catch small trajectory_dfs
if len(trajectory_df.index) < MIN_TRAJECTORY_LEN:
return None
else:
num_segments = len(trajectory_df.index) - WINDOW_LEN
# for each trajectory, loop through segments
# super_data = np.zeros((num_segments+1, LAGS+1+1, 2*len(INTERESTED_VALS)+1))
# super_data = np.zeros((2*len(INTERESTED_VALS), num_segments, LAGS+1))
# super_data_confint_upper = np.zeros((2*len(INTERESTED_VALS), num_segments, LAGS+1))
# super_data_confint_lower = np.zeros((2*len(INTERESTED_VALS), num_segments, LAGS+1))
confident_data = np.zeros((2*len(INTERESTED_VALS), num_segments, LAGS+1))
# segmentnames = np.ndarray.flatten( np.array([["{name:s} seg{index:0>3d}".format(name="C", index=segment_i)]*(LAGS+1) for segment_i in range(num_segments)]) )
for segment_i in range(num_segments):
# slice out segment from trajectory
segment = trajectory_df[segment_i:segment_i+WINDOW_LEN]
# data_matrix = np.zeros((2*len(INTERESTED_VALS), LAGS+1))
# confint_matrix = np.zeros((2*len(INTERESTED_VALS), LAGS+1))
## for segment, run PACF and ACF for each feature
# do analysis variable by variable
for var_name, var_values in segment.iteritems():
# make matrices
# make dictionary for column indices
var_index = segment.columns.get_loc(var_name)
# {'velo_x':0, 'velo_y':1, 'velo_z':2, 'curve':3, 'log_curve':4}[var_name]
# run ACF and PACF for the column
col_acf, acf_confint = acf(var_values, nlags=LAGS, alpha=.05)#, qstat= True)
# store data
# super_data[var_index, segment_i, :] = col_acf
# super_data_confint_lower[var_index, segment_i, :] = acf_confint[:,0]
# super_data_confint_upper[var_index, segment_i, :] = acf_confint[:,1]
# make confident data
acf_confint_distance = acf_confint[:,1] - acf_confint[:,0]
ACF_conf_booltable = acf_confint_distance[:] >= CONFINT_THRESH
filtered_data = col_acf
filtered_data[ACF_conf_booltable] = 0.
confident_data[var_index, segment_i, :] = filtered_data
## , acf_confint, acf_qstats, acf_pvals
col_pacf, pacf_confint = pacf(var_values, nlags=LAGS, method='ywmle', alpha=.05)
# TODO: check for PACF values above or below +-1
# super_data[var_index+len(INTERESTED_VALS), segment_i, :] = col_pacf
# super_data_confint_lower[var_index+len(INTERESTED_VALS), segment_i, :] = pacf_confint[:,0]
# super_data_confint_upper[var_index+len(INTERESTED_VALS), segment_i, :] = pacf_confint[:,1]
# make confident data
pacf_confint_distance = pacf_confint[:,1] - pacf_confint[:,0]
PACF_conf_booltable = pacf_confint_distance[:] >= CONFINT_THRESH
filtered_data = col_pacf # make a copy
filtered_data[PACF_conf_booltable] = 0.
confident_data[var_index+len(INTERESTED_VALS), segment_i, :] = filtered_data
# analysis panel
major_axis=[np.array([csv_fname]*num_segments), np.array(["{index:0>3d}".format(index=segment_i) for segment_i in range(num_segments)])]
# p = pd.Panel(super_data,
# items=['acf_velox', 'acf_veloy','acf_veloz', 'acf_curve', 'acf_logcurve', 'pacf_velox', 'pacf_veloy', 'pacf_veloz', 'pacf_curve', 'pacf_logcurve'],
## major_axis=np.array(["{name:s} seg{index:0>3d}".format(name=csv_fname, index=segment_i) for segment_i in range(num_segments)]),
# major_axis=major_axis,
# minor_axis=np.arange(LAGS+1))
# p.major_axis.names = ['Trajectory', 'segment_ID']
#
# # confint panel
# p_confint_upper = pd.Panel(super_data_confint_upper,
# items=['acf_velox', 'acf_veloy','acf_veloz', 'acf_curve', 'acf_logcurve', 'pacf_velox', 'pacf_veloy', 'pacf_veloz', 'pacf_curve', 'pacf_logcurve'],
## major_axis=np.array(["{name:s} seg{index:0>3d}".format(name=csv_fname, index=segment_i) for segment_i in range(num_segments)]),
# major_axis=major_axis,
# minor_axis=np.arange(LAGS+1))
# p_confint_upper.major_axis.names = ['Trajectory', 'segment_ID']
#
# p_confint_lower = pd.Panel(super_data_confint_lower,
# items=['acf_velox', 'acf_veloy','acf_veloz', 'acf_curve', 'acf_logcurve', 'pacf_velox', 'pacf_veloy', 'pacf_veloz', 'pacf_curve', 'pacf_logcurve'],
## major_axis=np.array(["{name:s} seg{index:0>3d}".format(name=csv_fname, index=segment_i) for segment_i in range(num_segments)]),
# major_axis=major_axis,
# minor_axis=np.arange(LAGS+1))
# p_confint_lower.major_axis.names = ['Trajectory', 'segment_ID']
# analysis panel
filtpanel = pd.Panel(confident_data,
items=['acf_velox', 'acf_veloy','acf_veloz', 'acf_curve', 'acf_logcurve', 'pacf_velox', 'pacf_veloy', 'pacf_veloz', 'pacf_curve', 'pacf_logcurve'],
# major_axis=np.array(["{name:s} seg{index:0>3d}".format(name=csv_fname, index=segment_i) for segment_i in range(num_segments)]),
major_axis=major_axis,
minor_axis=np.arange(LAGS+1))
filtpanel.major_axis.names = ['Trajectory', 'segment_ID']
return filtpanel
# return p, p_confint_upper, p_confint_lower, filtpanel
def plot_analysis(analysis_panel):#, confint_lower_panel, confint_upper_panel):
print "Plotting."
analysis_types = {'acf_velox': "ACF Velocity x", 'acf_veloy': "ACF Velocity y",\
'acf_veloz': "ACF Velocity Z", 'acf_curve': "ACF curvature", \
'acf_logcurve': "ACF log(curvature)",\
'pacf_velox': "PACF Velocity x", 'pacf_veloy': "PACF Velocity y", \
'pacf_veloz': "PACF Velocity z", 'pacf_curve': "PACF curvature", \
'pacf_logcurve': "PACF log(curvature)"}
type2raw = {'acf_velox': "velo_x", 'acf_veloy': "velo_y",\
'acf_veloz': "velo_z", 'acf_curve': "curve", \
'acf_logcurve': "log_curve",\
'pacf_velox': "velo_x", 'pacf_veloy': "velo_y", \
'pacf_veloz': "velo_z", 'pacf_curve': "curve", \
'pacf_logcurve': "log_curve"}
# print type(analysis_panel)
for analysis, title in analysis_types.iteritems():
DF = analysis_panel[analysis].sortlevel(0)
# DF_lower = confint_lower_panel[analysis].sortlevel(0)
# DF_upper = confint_upper_panel[analysis].sortlevel(0)
#TODO figure out DF.index.lexsort_depth error
for csv_fname, df in DF.groupby(level=0):
if not os.path.exists('./correlation_figs/{data_name}'.format(data_name = csv_fname)):
os.makedirs('./correlation_figs/{data_name}'.format(data_name = csv_fname))
# num segs in this csv
num_segs = df.shape[0] *1.0 # turn to floats
# select confint data
# df_lower = DF_lower.xs(csv_fname, level='Trajectory')
# df_upper = DF_upper.xs(csv_fname, level='Trajectory')
# fig = plt.figure()
# plt.title(csv_fname + " " + title)
# plt.ylabel("Correlation")
# plt.xlabel("Lags")
# plt.ylim([-1, 1])
#
# seg_iterator = df.iterrows()
#
# # plot flat
# color = iter(plt.cm.Set2(np.linspace(0,1,num_segs)))
# for index, seg in seg_iterator:
# c=next(color)
# sns.plt.plot(seg, color=c, alpha=0.6)
# plt.plot(range(21), np.zeros(21), color='lightgray')
# plt.savefig("./correlation_figs/{data_name}/{data_name} - 2D{label}.svg".format(label=analysis, data_name = csv_fname), format="svg")
# plot as a surface
surfacefig = plt.figure()
surfaceax = surfacefig.gca(projection='3d')
plt.title(csv_fname + " " + title)
x = np.arange(LAGS+1.0)
y = np.arange(num_segs)
XX, YY = np.meshgrid(x, y)
surf = surfaceax.plot_surface(XX, YY, df, shade=False,
facecolors=plt.cm.Set2((YY-YY.min()) / (YY.max()-YY.min())), cstride=1, rstride=5, alpha=0.7)
# add grey plane at corr=0
zeroplane = np.zeros_like(XX)
surfaceax.plot_surface(XX, YY, zeroplane, color='lightgray', linewidth=0, alpha=0.3)
# # plot upper conf int
# surfaceax.plot_surface(XX, YY, df_upper, color='r', alpha=0.1, linewidth=0)
# surfaceax.plot_surface(XX, YY, df_lower, color='r', alpha=0.1, linewidth=0)
surfaceax.set_xlabel("Lags")
surfaceax.set_ylabel("Segment Index")
surfaceax.set_zlabel("Correlation")
surfaceax.set_zlim(-1, 1)
plt.draw() # you need this to get the edge color
line = np.array(surf.get_edgecolor())
surf.set_edgecolor(line*np.array([0,0,0,0])+1) # make lines white, and keep alpha==1. It's an array of colors like this: [r,g,b,alpha]
plt.savefig("./correlation_figs/{data_name}/{data_name} - 3D{label}.svg".format(label=analysis, data_name = csv_fname), format="svg")
# # plot relevant raw data, colorized
variable = type2raw[analysis]
raw_data = trajectory_DF.xs(csv_fname, level='Trajectory')[variable].values
x = range(len(raw_data))
variable_trace = plt.figure()
ax1 = variable_trace.add_subplot(111) # regular resolution color map
#
#
cm = plt.get_cmap('Set2')
# first we substract WINDOWLEN from range so that we only color the starting
#points of each window. then we append black values to the end of
# the color cycle to make that part of the plots black
color_cycle = [cm(1.*i/(num_segs-1-WINDOW_LEN)) for i in range(int(num_segs)-1-WINDOW_LEN)]
color_cycle = color_cycle + [(0., 0., 0., 1.)]*WINDOW_LEN
ax1.set_color_cycle(color_cycle)
for i in range(int(num_segs)-1):
ax1.plot(x[i:i+2], raw_data[i:i+2])
plt.title(csv_fname + " " + variable)
plt.xlabel('Trajectory data timestep (ms)')
plt.ylabel('Value recorded (SI units)')
plt.savefig("./correlation_figs/{data_name}/{data_name} - raw {variable}.svg".format(data_name = csv_fname, variable = variable), format="svg")
# fig = plt.figure()
# conf_ints = [95, 68]
# sns.tsplot(df)
## sns.tsplot(df, time="Lags", unit="Segment", condition="Condition",\
## value=analysis, err_palette= palette,\
## err_style="unit_traces") # uncomment for unit traces
### err_style="ci_band", ci = conf_ints) # uncomment for CIs
# sns.plt.title(title)
# sns.plt.ylabel("Correlation")
# sns.plt.ylim([-1, 1])
# plt.savefig("./correlation_figs/{label}.svg".format(label=analysis), format="svg")
# sns.plt.show()
# if np.isnan(graph_matrix).any(): # we have nans
# print "Error! NaNs in matrix!"
# return graph_matrix
# sweet stuff ######
# df.mean(axis=0)
# df.var(axis=0)
# df.std(axis=0)
csv_list = make_csv_name_list()
##csv_list = ['Right Plume-39', 'Control-27']
##name = csv_list[0]
##csv_list = ['Right Plume-01', 'Right Plume-02', 'Right Plume-03', 'Right Plume-04', 'Right Plume-05']#, 'Right Plume-06', 'Right Plume-07']
trajectory_DF = csvList2df(csv_list)
#trajectory_DF = pd.concat(trajectory_DFs_dict.values())
#
##segment_analysis_DF, super_data = DF_dict2analyzedSegments(trajectory_DFs_dict)
#analysis_panel, confint_lower_panel, confint_upper_panel = DF2analyzedSegments(trajectory_DF)
filtered_panel = DF2analyzedSegments(trajectory_DF)
##
###plt.style.use('ggplot')
###graph_matrix = plot_analysis(segment_analysis_DF)
#plot_analysis(analysis_panel, confint_lower_panel, confint_upper_panel)
plot_analysis(filtered_panel)
| mit |
LaceyChen17/instacart-market-basket-analysis | transactions.py | 1 | 37705 | import gc
import os
import pickle
import numpy as np
import pandas as pd
from scipy.stats import entropy
from scipy.spatial.distance import euclidean
from constants import NUM_TOPIC
# from utils import is_organic, flatten_multiidx
import pdb
class TransLogConstructor:
def __init__(self, raw_data_dir, cache_dir):
self.raw_data_dir = raw_data_dir
self.cache_dir = cache_dir
def clear_cache(self):
for root, dirs, files in os.walk(self.raw_data_dir):
for name in files:
if name.endswith(".h5"):
os.remove(os.path.join(root, name))
print("Delete %s"%os.path.join(root, name))
print("Clear all cached h5!")
def get_orders(self):
'''
get order context information
'''
if os.path.exists(self.raw_data_dir + 'orders.h5'):
orders = pd.read_hdf(self.raw_data_dir + 'orders.h5')
else:
orders = pd.read_csv(self.raw_data_dir + 'orders.csv',
dtype = {'order_id': np.int32,
'user_id': np.int32,
'eval_set': 'category',
'order_number': np.int16,
'order_dow': np.int8,
'order_hour_of_day' : np.int8,
'days_since_prior_order': np.float32})
orders['days_since_prior_order'] = orders['days_since_prior_order'].fillna(0.0)
orders['days'] = orders.groupby(['user_id'])['days_since_prior_order'].cumsum()
orders['days_last'] = orders.groupby(['user_id'])['days'].transform(max)
orders['days_up_to_last'] = orders['days_last'] - orders['days']
del orders['days_last']
del orders['days']
orders.to_hdf(self.raw_data_dir + 'orders.h5', 'orders', mode = 'w', format = 'table')
return orders
def get_orders_items(self, prior_or_train):
'''
get detailed information of prior or train orders
'''
if os.path.exists(self.raw_data_dir + 'order_products__%s.h5'%prior_or_train):
order_products = pd.read_hdf(self.raw_data_dir + 'order_products__%s.h5'%prior_or_train)
else:
order_products = pd.read_csv(self.raw_data_dir + 'order_products__%s.csv'%prior_or_train,
dtype = {'order_id': np.int32,
'product_id': np.uint16,
'add_to_cart_order': np.int16,
'reordered': np.int8})
order_products.to_hdf(self.raw_data_dir + 'order_products__%s.h5'%prior_or_train, 'op', mode = 'w', format = 'table')
return order_products
def get_users_orders(self, prior_or_train, pad = 'product_id'):
'''
get users' detailed orders
oid, uid, pid, aid, did, reordered, days_since_prior_order, days_up_to_last,
hod, dow, pad[0]_purchase_times, pad[0]_purchase_interval
'''
if os.path.exists(self.raw_data_dir + 'user_orders_%s_%s.h5'%(prior_or_train, pad[:-3])):
user_orders = pd.read_hdf(self.raw_data_dir + 'user_orders_%s_%s.h5'%(prior_or_train, pad[:-3]))
else:
orders = self.get_orders()
del orders['eval_set']
order_items = self.get_orders_items(prior_or_train)
products = self.get_items('products')[['product_id', 'aisle_id', 'department_id']]
user_orders = pd.merge(order_items, orders, on = ['order_id'], how = 'left')
user_orders = pd.merge(user_orders, products, on = ['product_id'], how = 'left')
del order_items, products, orders
if prior_or_train == 'prior':
prefix = pad[0] + '_'
user_orders[prefix + 'purchase_times'] = (user_orders.sort_values(['user_id', pad, 'order_number'])
.groupby(['user_id', pad]).cumcount()+1)
user_orders[prefix + 'purchase_interval'] = (user_orders.sort_values(['user_id', pad, 'order_number'], ascending = False)
.groupby(['user_id', pad])['days_up_to_last'].diff())
user_orders[prefix + 'purchase_interval'] = user_orders[prefix + 'purchase_interval'].fillna(-1) # 1st time purchase
user_orders.to_hdf(self.raw_data_dir + 'user_orders_%s_%s.h5'%(prior_or_train, pad[:-3]), 'user_orders', mode = 'w')
return user_orders
def get_items(self, gran):
'''
get items' information
gran = [departments, aisles, products]
'''
items = pd.read_csv(self.raw_data_dir + '%s.csv'%gran)
return items
class TransLogExtractor(TransLogConstructor):
def __init__(self, raw_data_dir, cache_dir):
super().__init__(raw_data_dir, cache_dir)
def clear_cache(self, include_raw = False):
if include_raw:
super().clear_cache()
for root, dirs, files in os.walk(self.cache_dir):
for name in files:
if name.endswith("_feat.pkl") or name.endswith('_feat.h5'):
os.remove(os.path.join(root, name))
print("Delete %s"%os.path.join(root, name))
if name == 'train.h5' or name == 'test.h5':
os.remove(os.path.join(root, name))
print("Delete %s"%os.path.join(root, name))
print("Clear all cached !")
def cal_first_second(self, user_orders, pad, gcol):
prefix = pad[0] + '_'
is_user = 'u_' if gcol == 'user_id' else ''
first_purchase = (user_orders[user_orders[prefix + 'purchase_times'] == 1].groupby(gcol)[prefix + 'purchase_times']
.aggregate({is_user + prefix + 'first_times': 'count'}).reset_index())
second_purchase = (user_orders[user_orders[prefix + 'purchase_times'] == 2].groupby(gcol)[prefix + 'purchase_times']
.aggregate({is_user + prefix + 'second_times': 'count'}).reset_index())
first_second = pd.merge(first_purchase, second_purchase, on = gcol, how = 'left')
first_second[is_user + prefix + 'second_times'] = first_second[is_user + prefix + 'second_times'].fillna(0)
first_second[is_user + prefix + 'reorder_prob'] = first_second[is_user + prefix + 'second_times'] / first_second[is_user + prefix + 'first_times']
del user_orders
return first_second
def cal_dow_hod(self, user_orders, prefix, gcol):
dow = user_orders.groupby(gcol)['order_dow'].value_counts().unstack(fill_value = 0.0)
dow_entropy = dow.apply(lambda x: entropy(x.values, np.ones(len(x))), axis = 1).rename(prefix + 'dow_entropy').reset_index()
dow_most = dow.apply(lambda x: max(x.values), axis = 1).rename(prefix + 'dow_most').reset_index()
dow_argmost = dow.apply(lambda x: np.argmax(x.values), axis = 1).rename(prefix + 'dow_argmost').reset_index()
dow = dow_entropy.merge(dow_most, on = gcol, how = 'left')
dow = dow.merge(dow_argmost, on = gcol, how = 'left')
hod = user_orders.groupby(gcol)['order_hour_of_day'].value_counts().unstack(fill_value = 0.0)
hod_entropy = hod.apply(lambda x: entropy(x.values, np.ones(len(x))), axis = 1).rename(prefix + 'hod_entropy').reset_index()
hod_most = hod.apply(lambda x: max(x.values), axis = 1).rename(prefix + 'hod_most').reset_index()
hod_argmost = hod.apply(lambda x: np.argmax(x.values), axis = 1).rename(prefix + 'hod_argmost').reset_index()
hod = hod_entropy.merge(hod_most, on = gcol, how = 'left')
hod = hod.merge(hod_argmost, on = gcol, how = 'left')
dow_hod = dow.merge(hod, on = gcol, how = 'left')
del user_orders
return dow_hod
def cal_pad_agg(self, user_orders, prefix, pad, agg_col, agg_ops):
''' user feat'''
mid = pad[0] + '_'
suffix = agg_col[10:]
pad_agg = (user_orders.groupby(['user_id', pad])[agg_col].aggregate({agg_col: agg_ops}).reset_index()
.groupby(['user_id'])[agg_col].aggregate({
prefix + mid + 'avg' + suffix: 'mean',
prefix + mid + 'std' + suffix: 'std',
prefix + mid + 'min' + suffix: 'min',
prefix + mid + 'max' + suffix: 'max',
prefix + mid + 'med' + suffix: 'median'}).reset_index())
del user_orders
return pad_agg
def craft_label_none(self):
if os.path.exists(self.cache_dir + 'label_none.pkl'):
with open(self.cache_dir + 'label_none.pkl', 'rb') as f:
label_none = pickle.load(f)
else:
user_product = self.get_users_orders('train')
o_is_none = user_product.groupby(['order_id']).agg({'reordered':{'o_reordered_num':sum}})#.reset_index()
o_is_none.columns = o_is_none.columns.droplevel(0)
o_is_none.reset_index(inplace=True)
user_product = pd.merge(user_product,
o_is_none,
on = ['order_id'],
how = 'left')
user_product['label_none'] = user_product['o_reordered_num'].apply(lambda x : int(x == 0))
label_none = user_product[['user_id', 'order_id', 'label_none']].drop_duplicates()
with open(self.cache_dir + 'label_none.pkl', 'wb') as f:
pickle.dump(label_none, f, pickle.HIGHEST_PROTOCOL)
return label_none
def craft_label(self):
if os.path.exists(self.cache_dir + 'label.pkl'):
with open(self.cache_dir + 'label.pkl', 'rb') as f:
label = pickle.load(f)
else:
# orders = self.get_orders()
# order_products_train = self.get_orders_items('train')
# user_product = pd.merge(order_products_train, orders, on = ['order_id'], how = 'left')
user_product = self.get_users_orders('train')
label = user_product[user_product.reordered == 1][['user_id', 'product_id', 'reordered']]
label.columns = ['user_id', 'product_id', 'label']
with open(self.cache_dir + 'label.pkl', 'wb') as f:
pickle.dump(label, f, pickle.HIGHEST_PROTOCOL)
return label
def craft_context(self, train_or_test):
'''
train_or_test = ['train', 'test']
'''
if os.path.exists(self.cache_dir + 'context_feat_%s.pkl'%train_or_test):
with open(self.cache_dir + 'context_feat_%s.pkl'%train_or_test, 'rb') as f:
context_feat = pickle.load(f)
else:
orders = self.get_orders()
orders = orders[orders.eval_set == train_or_test]
context_feat = orders[['order_id', 'user_id', 'order_dow', 'order_hour_of_day', 'days_since_prior_order']]
context_feat.columns = ['order_id', 'user_id', 'ct_order_dow', 'ct_order_hour_of_day', 'ct_days_since_prior_order']
with open(self.cache_dir + 'context_feat_%s.pkl'%train_or_test, 'wb') as f:
pickle.dump(context_feat, f, pickle.HIGHEST_PROTOCOL)
return context_feat
def craft_feat_user(self):
''' all users feat'''
if os.path.exists(self.cache_dir + 'user_feat.h5'):
user_feat = pd.read_hdf(self.cache_dir + 'user_feat.h5')
else:
prefix = 'u_'
dfs = [self.get_users_orders('prior', 'product_id'),
self.get_users_orders('prior', 'aisle_id')[['order_id', 'a_purchase_times', 'a_purchase_interval']],
self.get_users_orders('prior', 'department_id')[['order_id', 'd_purchase_times', 'd_purchase_interval']]]
dfs =[df.set_index('order_id', drop=True)for df in dfs]
user_orders = pd.concat(dfs, axis=1, join='outer', copy=False)
user_orders.reset_index(drop=False, inplace=True)
del dfs
grouped = user_orders.groupby(['user_id']).agg({
'order_number' : {'u_total_orders' : max},
'reordered' : {'u_total_reorders' : sum,
'u_reorder_ratio':'mean'},
'product_id' : {'u_total_prods' : pd.Series.nunique},
'aisle_id':{prefix + 'total_aisles': pd.Series.nunique},
'department_id':{prefix + 'total_deps':pd.Series.nunique},
'days_up_to_last': {'u_active_first' : max,
'u_active_last': min},
'add_to_cart_order':{ 'u_min_add2cart_order': min,
'u_max_add2cart_order': max,
'u_avg_add2cart_order':'mean',
'u_std_add2cart_order':'std',
'u_med_add2cart_order':'median'}})#.reset_index()
grouped.columns = grouped.columns.droplevel(0)
grouped.reset_index(inplace = True)
# grouped = flatten_multiidx(grouped)
grouped['u_active_last_30'] = grouped['u_active_last'] % 30
grouped['u_active_last_21'] = grouped['u_active_last'] % 21
grouped['u_active_last_14'] = grouped['u_active_last'] % 14
grouped['u_active_last_7'] = grouped['u_active_last'] % 7
grouped['u_active_period'] = grouped['u_active_first'] - grouped['u_active_last']
grouped['u_avg_reorders'] = grouped['u_total_reorders'] / grouped['u_total_orders']
grouped['u_mean_interval'] = grouped['u_active_period'] / grouped['u_total_orders']
grouped['u_mean_basket'] = grouped['u_total_prods'] / grouped['u_total_orders']
# grouped['u_al_vs_mi'] = grouped['u_active_last'] / grouped['u_mean_interval']
for pad in ['product_id', 'aisle_id', 'department_id']:
agg_col = pad[0] + '_' + 'purchase_times' # p purchase_times, a_purchase_times, d_purchase_times
pad_agg = self.cal_pad_agg(user_orders, prefix, pad, agg_col, 'max')
grouped = grouped.merge(pad_agg, on = 'user_id', how = 'left')
del pad_agg
agg_col = pad[0] + '_' + 'purchase_interval'
pad_agg = self.cal_pad_agg(user_orders[(user_orders.p_purchase_interval != -1)], prefix, pad, agg_col, 'mean')
grouped = grouped.merge(pad_agg, on = 'user_id', how = 'left')
del pad_agg
dow_hod = self.cal_dow_hod(user_orders, prefix, 'user_id')
grouped = grouped.merge(dow_hod, on = ['user_id'], how = 'left')
del dow_hod
reorder_pnum = (user_orders[user_orders.reordered == 1]
.groupby(['user_id', 'order_id'])['product_id']
.agg({'reorder_pnum':'count'}).reset_index()
.groupby(['user_id'])['reorder_pnum']
.agg({'u_reorder_pnum_mean':'mean', 'u_reorder_pnum_std':'std'}).reset_index())
grouped =grouped.merge(reorder_pnum, on = ['user_id'], how = 'left')
del reorder_pnum
grouped = grouped.merge(self.cal_first_second(user_orders, 'product_id', 'user_id'), on = ['user_id'], how = 'left')
grouped = grouped.merge(self.cal_first_second(user_orders, 'aisle_id', 'user_id'), on = ['user_id'], how = 'left')
user_feat = grouped.merge(self.cal_first_second(user_orders, 'department_id', 'user_id'), on = ['user_id'], how = 'left')
del grouped, user_orders
na_cols = ['u_p_avg_interval', 'u_p_med_interval', 'u_p_min_interval', 'u_p_max_interval',
'u_a_avg_interval', 'u_a_med_interval', 'u_a_min_interval', 'u_a_max_interval',
'u_d_avg_interval', 'u_d_med_interval', 'u_d_min_interval', 'u_d_max_interval']
for col in na_cols:
user_feat[col] = user_feat[col].fillna(user_feat['u_mean_interval'])
na_cols = ['u_p_std_interval', 'u_a_std_interval', 'u_d_std_interval',
'u_p_std_times', 'u_a_std_times', 'u_d_std_times',
'u_reorder_pnum_std', 'u_reorder_pnum_mean']
user_feat[na_cols] = user_feat[na_cols].fillna(0)
user_feat.to_hdf(self.cache_dir + 'user_feat.h5', 'user', mode = 'w')
return user_feat
def craft_feat_item(self, pad):
'''
pad = [product_id, aisle_id, department_id]
'''
if os.path.exists(self.cache_dir + '%s_feat.h5'%pad[:-3]):
item_feat = pd.read_hdf(self.cache_dir + '%s_feat.h5'%pad[:-3])
else:
prefix = pad[0] + '_'
user_orders = self.get_users_orders('prior', pad)
grouped = user_orders.groupby(pad).agg(
{prefix + 'purchase_times':{prefix + 'max_times':max,
prefix + 'min_times':min},
'user_id':{prefix + 'num_purchsers': pd.Series.nunique},
'reordered':{prefix + 'reorder_sum':sum,
prefix + 'reorder_total':'count'},
'days_up_to_last':{prefix + 'days_to_last':min,
prefix + 'days_to_first':max},
'add_to_cart_order':{prefix + 'min_add2cart_order':min,
prefix + 'max_add2cart_order':max,
prefix + 'avg_add2cart_order':'mean',
prefix + 'std_add2cart_order':'std',
prefix + 'med_add2cart_order':'median'}})#.reset_index()
grouped.columns = grouped.columns.droplevel(0)
grouped.reset_index(inplace=True)
# grouped = flatten_multiidx(grouped)
grouped[prefix + 'std_add2cart_order'] = grouped[prefix + 'std_add2cart_order'].fillna(0)
grouped[prefix + 'active_period'] = grouped[prefix + 'days_to_first'] - grouped[prefix + 'days_to_last']
grouped[prefix + 'reorder_ratio'] = grouped[prefix + 'reorder_sum'] / grouped[prefix + 'reorder_total']
first_second = self.cal_first_second(user_orders, pad, pad)
grouped = grouped.merge(first_second, on = [pad], how = 'left')
del first_second
grouped[prefix + 'order_pp'] = grouped[prefix + 'reorder_total'] /grouped[prefix + 'first_times']
grouped[prefix + 'reorder_pp'] = grouped[prefix + 'reorder_sum'] / grouped[prefix + 'first_times']
dow_hod = self.cal_dow_hod(user_orders, prefix, pad)
grouped = grouped.merge(dow_hod, on = [pad], how = 'left')
del dow_hod
interval_feat = user_orders[user_orders[prefix + 'purchase_interval'] != -1].groupby([pad]).agg(
{prefix + 'purchase_interval':{prefix + 'mean_interval': 'mean',
prefix + 'median_interval': 'median',
prefix + 'std_interval': 'std',
prefix + 'min_interval': min,
prefix + 'max_interval': max}})#.reset_index()
interval_feat.columns = interval_feat.columns.droplevel(0)
interval_feat.reset_index(inplace=True)
# interval_feat = flatten_multiidx(interval_feat)
interval_feat[prefix + 'std_interval'] = interval_feat[prefix + 'std_interval'].fillna(0)
grouped = grouped.merge(interval_feat, on = [pad], how = 'left')
del interval_feat, user_orders
times = self.craft_feat_interact(pad)[[pad, 'u'+prefix+'order_num']]
times_feat = times.groupby(pad).agg(
{'u'+prefix+'order_num':{prefix + 'mean_times':'mean',
prefix + 'median_times':'median',
prefix + 'std_times':'std'}})# .reset_index()
del times
times_feat.columns = times_feat.columns.droplevel(0)
times_feat.reset_index(inplace=True)
# times_feat = flatten_multiidx(times_feat)
times_feat[prefix + 'std_times'] = times_feat[prefix + 'std_times'].fillna(0)
item_feat = grouped.merge(times_feat, on = [pad], how = 'left')
del times_feat, grouped
na_cols = [prefix + 'mean_interval', prefix + 'median_interval', prefix + 'min_interval', prefix + 'max_interval']
for col in na_cols:
item_feat[col] = item_feat[col].fillna(item_feat[prefix + 'days_to_last']) # only purchase once
item_feat[prefix + 'std_interval'] = item_feat[prefix + 'std_interval'].fillna(0)
item_feat.to_hdf(self.cache_dir + '%s_feat.h5'%pad[:-3], 'item', mode = 'w')
return item_feat
# def craft_feat_textual(self, item):
# '''
# TODO textual feat from item name
# word2vec
# '''
# if os.path.exists(self.cache_dir + 'textual_feat.pkl'):
# with open(self.cache_dir + 'textual_feat.pkl', 'rb') as f:
# textual_feat = pickle.load(f)
# else:
# item_info = self.get_items(item)
# item_info[item[0] + '_organic'] = item_info[item[:-1] + '_name'].apply(is_organic)
# textual_feat = item_info[[item[:-1] + '_id', item[0] + '_organic']]
# with open(self.cache_dir + 'textual_feat.pkl', 'wb') as f:
# pickle.dump(textual_feat, f, pickle.HIGHEST_PROTOCOL)
# return textual_feat
def craft_feat_pad(self):
'''
combine product, department, aisle
'''
if os.path.exists(self.cache_dir + 'pad_feat.h5'):
pad_feat = pd.read_hdf(self.cache_dir + 'pad_feat.h5')
else:
pad_feat = (self.craft_feat_item('product_id')
.merge(self.get_items('products')[['product_id', 'department_id', 'aisle_id']],
on = ['product_id'], how = 'left'))
pad_feat = pad_feat.merge(self.craft_feat_item('aisle_id'), on = ['aisle_id'], how = 'left')
pad_feat = pad_feat.merge(self.craft_feat_item('department_id'), on = ['department_id'], how = 'left')
# pad_feat = pad_feat.merge(self.craft_feat_textual('products'), on = ['product_id'], how = 'left')
pad_feat['p_a_market_share'] = pad_feat['p_reorder_total'] / pad_feat['a_reorder_total']
pad_feat['p_d_market_share'] = pad_feat['p_reorder_total'] / pad_feat['d_reorder_total']
pad_feat['a_d_market_share'] = pad_feat['a_reorder_total'] / pad_feat['d_reorder_total']
pad_feat['p_a_avg_add2cart'] = pad_feat['p_avg_add2cart_order'] / pad_feat['a_avg_add2cart_order']
pad_feat['p_d_avg_add2cart'] = pad_feat['p_avg_add2cart_order'] / pad_feat['d_avg_add2cart_order']
pad_feat['a_d_avg_add2cart'] = pad_feat['a_avg_add2cart_order'] / pad_feat['d_avg_add2cart_order']
pad_feat['p_a_max_times'] = pad_feat['p_max_times'] / pad_feat['a_max_times']
pad_feat['p_d_max_times'] = pad_feat['p_max_times'] / pad_feat['d_max_times']
pad_feat['a_d_max_times'] = pad_feat['a_max_times'] / pad_feat['d_max_times']
pad_feat['p_a_std_interval'] = pad_feat['p_std_interval'] / pad_feat['a_std_interval']
pad_feat['p_d_std_interval'] = pad_feat['p_std_interval'] / pad_feat['d_std_interval']
pad_feat['a_d_std_interval'] = pad_feat['a_std_interval'] / pad_feat['d_std_interval']
pad_feat.to_hdf(self.cache_dir + 'pad_feat.h5', 'pad', mode = 'w')
return pad_feat
def craft_feat_interact(self, pad):
'''
all users interact feat
pad = ['product_id', 'aisle_id', 'department_id']
'''
if os.path.exists(self.cache_dir + 'interact_feat_%s.h5'%pad[:-3]):
interact_feat = pd.read_hdf(self.cache_dir +'interact_feat_%s.h5'%pad[:-3])
else:
user_product = self.get_users_orders('prior', pad).sort_values(['user_id', 'order_number'])
prefix = 'u'+ pad[0] + '_'
prefix_without_u = pad[0] + '_'
grouped = user_product.groupby(['user_id', pad]).agg(
{'reordered':{prefix +'reorder_num':sum,
prefix + 'order_num':'count'},
'order_number':{prefix + 'first_order':min,
prefix + 'last_order':max},
'days_up_to_last':{prefix + 'days_to_last':min, # last purchase
prefix + 'days_to_first':max}, # first purchase
'add_to_cart_order':{prefix + 'min_add2cart_order':min,
prefix + 'max_add2cart_order':max,
prefix + 'avg_add2cart_order':'mean',
prefix + 'std_add2cart_order':'std',
prefix + 'med_add2cart_order':'median'}})#.reset_index()
grouped.columns = grouped.columns.droplevel(0)
grouped.reset_index(inplace=True)
# grouped = flatten_multiidx(grouped)
grouped[prefix + 'active_days'] = grouped[prefix + 'days_to_first'] - grouped[prefix + 'days_to_last']
grouped[prefix + 'std_add2cart_order'] = grouped[prefix + 'std_add2cart_order'].fillna(0)
grouped = pd.merge(grouped, self.craft_feat_user()[['user_id',
'u_total_orders',
'u_total_reorders',
'u_min_add2cart_order',
'u_max_add2cart_order',
'u_avg_add2cart_order',
'u_std_add2cart_order',
'u_med_add2cart_order']],
on = ['user_id'], how = 'left')
grouped[prefix + 'order_since_last'] = grouped['u_total_orders'] - grouped[prefix + 'last_order']
grouped[prefix + 'order_ratio_last'] = grouped[prefix + 'order_since_last'] / grouped['u_total_orders']
grouped[prefix + 'order_ratio'] = grouped[prefix + 'order_num'] / grouped['u_total_orders']
grouped[prefix + 'reorder_ratio'] = grouped[prefix + 'reorder_num'] / grouped['u_total_reorders']
grouped[prefix + 'order_ratio_first'] = grouped[prefix + 'order_num'] / (grouped['u_total_orders'] - grouped[prefix + 'first_order'] + 1)
grouped[prefix + 'min_add2cart_ratio'] = grouped[prefix + 'min_add2cart_order'] / grouped['u_min_add2cart_order']
grouped[prefix + 'max_add2cart_ratio'] = grouped[prefix + 'max_add2cart_order'] / grouped['u_max_add2cart_order']
grouped[prefix + 'med_add2cart_ratio'] = grouped[prefix + 'med_add2cart_order'] / grouped['u_med_add2cart_order']
grouped[prefix + 'avg_add2cart_ratio'] = grouped[prefix + 'avg_add2cart_order'] / grouped['u_avg_add2cart_order']
grouped[prefix + 'std_add2cart_ratio'] = grouped[prefix + 'std_add2cart_order'] / grouped['u_std_add2cart_order']
grouped[prefix + 'days_to_last_7'] = grouped[prefix + 'days_to_last'] % 7
grouped[prefix + 'days_to_last_14'] = grouped[prefix + 'days_to_last'] % 14
grouped[prefix + 'days_to_last_21'] = grouped[prefix + 'days_to_last'] % 21
grouped[prefix + 'days_to_last_30'] = grouped[prefix + 'days_to_last'] % 30
dow_hod = self.cal_dow_hod(user_product, prefix, ['user_id', pad])
grouped = grouped.merge(dow_hod, on = ['user_id', pad], how = 'left')
del dow_hod
user_product['last_order'] =user_product.groupby(['user_id', pad])['order_number'].transform(max)
last_order = user_product[user_product['last_order'] == user_product['order_number']][['user_id', pad, 'order_hour_of_day', 'order_dow', 'days_since_prior_order']].drop_duplicates()
last_order.columns = ['user_id', pad, prefix + 'last_hod', prefix + 'last_dow', prefix + 'last_days_since_prior']
grouped = grouped.merge(last_order, on = ['user_id', pad], how = 'left')
del last_order, user_product['last_order']
avg_interval = (user_product[user_product.reordered == 1].groupby(['user_id', pad])
['days_since_prior_order'].mean().reset_index()) # fillna with last purchase
avg_interval.columns = ['user_id', pad, prefix + 'avg_interval']
grouped = grouped.merge(avg_interval, on = ['user_id', pad], how = 'left')
del avg_interval
grouped[prefix + 'avg_interval_m'] = grouped[prefix + 'days_to_first'] - grouped[prefix + 'days_to_last'] / grouped[prefix + 'order_num']
interval_feat = (user_product[user_product[prefix_without_u + 'purchase_interval'] != -1].groupby(['user_id', pad]).agg({
prefix_without_u + 'purchase_interval':{prefix + 'median_interval': 'median',
prefix + 'std_interval': 'std',
prefix + 'min_interval': min,
prefix + 'max_interval': max}}))#.reset_index()
interval_feat.columns = interval_feat.columns.droplevel(0)
interval_feat.reset_index(inplace=True)
interval_feat[prefix + 'std_interval'] = interval_feat[prefix + 'std_interval'].fillna(0)
grouped = grouped.merge(interval_feat, on = ['user_id', pad], how = 'left')
del interval_feat
user_product['order_number_last'] = user_product.groupby('user_id')['order_number'].transform(max)
is_last_purchase = (user_product[user_product.order_number == user_product.order_number_last]
.groupby(['user_id', pad]).apply(lambda x:1).reset_index())
is_last_purchase.columns = [['user_id', pad, prefix + 'is_purchase_last']]
interact_feat = grouped.merge(is_last_purchase, on = ['user_id', pad], how = 'left')
del is_last_purchase
na_cols = [prefix + 'avg_interval', prefix + 'median_interval', prefix + 'min_interval', prefix + 'max_interval']
for col in na_cols:
interact_feat[col] = interact_feat[col].fillna(interact_feat[prefix + 'days_to_last']) # only purchase once
na_cols = [prefix + 'reorder_ratio', prefix + 'std_interval', prefix + 'is_purchase_last']
interact_feat[na_cols] = interact_feat[na_cols].fillna(0)
na_cols = [prefix + 'std_add2cart_ratio']
interact_feat[na_cols] = interact_feat[na_cols].fillna(1)
del interact_feat['u_total_orders']
del interact_feat['u_total_reorders']
del interact_feat['u_min_add2cart_order']
del interact_feat['u_max_add2cart_order']
del interact_feat['u_avg_add2cart_order']
del interact_feat['u_std_add2cart_order']
del interact_feat['u_med_add2cart_order']
interact_feat.to_hdf(self.cache_dir + 'interact_feat_%s.h5'%pad[:-3], 'interact', mode = 'w')
return interact_feat
def craft_user_topic(self, filepath = None):
'''
TODO
user_topic from lda model
'''
if filepath is None:
filepath = self.cache_dir + 'user_topic_%d.pkl'%NUM_TOPIC
else:
filepath = self.cache_dir + filepath
if os.path.exists(filepath):
with open(filepath, 'rb') as f:
user_topic = pickle.load(f)
else:
print(filepath)
pass
return user_topic
def craft_product_topic(self, filepath = None):
'''
TODO
user_topic from lda model
'''
if filepath is None:
filepath = self.cache_dir + 'topic_product_%d.pkl'%NUM_TOPIC
else:
filepath = self.cache_dir + filepath
if os.path.exists(filepath):
with open(filepath, 'rb') as f:
topic_product = pickle.load(f)
else:
print(filepath)
pass
return topic_product
def craft_up_distance(self, filepath = None, num_topic = NUM_TOPIC, pad = 'product_id'):
'''
calculate (u,p) pairs distance
using LDA embedded representation
'''
if isinstance(filepath, list):
p_filepath, u_filepath = filepath[0], filepath[1]
filepath = self.cache_dir + p_filepath[:6] + 'feat.pkl'
prefix = p_filepath[:6]
else:
p_filepath, u_filepath = None, None
filepath = self.cache_dir + 'upd_feat_%d.pkl'%num_topic
prefix = ''
if os.path.exists(filepath):
upd = pd.read_pickle(filepath)
else:
def cal_up_distance(subf):
u_topic = subf[[prefix + "u_topic_%d"%x for x in range(num_topic)]]
p_topic = subf[[prefix + "p_topic_%d"%x for x in range(num_topic)]]
upd = euclidean(u_topic, p_topic)
return upd
upd = pd.merge(self.get_users_orders('prior')[['user_id', pad]].drop_duplicates(),
self.craft_user_topic(u_filepath),
on = ['user_id'],
how = 'left')
upd.columns = ['user_id', pad] + [prefix + "u_topic_%d"%x for x in range(num_topic)]
upd = pd.merge(upd,
self.craft_product_topic(p_filepath),
on = [pad],
how = 'left')
upd.columns = ['user_id', pad] + [prefix + "u_topic_%d"%x for x in range(num_topic)] + [prefix + "p_topic_%d"%x for x in range(num_topic)]
for col in [prefix + "p_topic_%d"%x for x in range(num_topic)]:
upd[col] = upd[col].fillna(upd[col].mean())
upd[prefix + 'up_dis'] = upd.apply(cal_up_distance, axis = 1)
upd[prefix + 'up_dis'] = upd[prefix + 'up_dis'].fillna(upd[prefix + 'up_dis'].mean())
with open(filepath, 'wb') as f:
pickle.dump(upd, f, pickle.HIGHEST_PROTOCOL)
return upd
def craft_p_w2v(self):
filepath = self.cache_dir + 'p_w2v_feat.pkl'
p_w2v = pd.read_pickle(filepath)
return p_w2v
def craft_topic_pc(self):
''' compressed topic feat by PCA'''
filepath = self.cache_dir + 'up_topic_pc.h5'
up_topic_pc = pd.read_hdf(filepath)
return up_topic_pc
def craft_topic_dis(self):
filepath = self.cache_dir + 'up_topic_dis.h5'
up_topic_dis = pd.read_hdf(filepath)
return up_topic_dis
def craft_up_interval(self):
filepath = self.cache_dir + 'up_delta.pkl'
up_delta = pd.read_pickle(filepath)
return up_delta
def craft_dream_score(self):
filepath = self.cache_dir + 'dream_score.pkl'
dream_score = pd.read_pickle(filepath)
return dream_score
def craft_dream_score_next(self, is_reordered=False):
if is_reordered is True:
filepath = self.cache_dir + 'reorder_dream_score_next.pkl'
else:
filepath = self.cache_dir + 'dream_score_next.pkl'
dream_score = pd.read_pickle(filepath)
return dream_score
def craft_dream_final(self, is_reordered=False):
if is_reordered is True:
filepath = self.cache_dir + 'reorder_dream_final.pkl'
else:
filepath = self.cache_dir + 'dream_final.pkl'
dream_final = pd.read_pickle(filepath)
return dream_final
def craft_dream_dynamic_u(self, is_reordered=False):
if is_reordered is True:
filepath = self.cache_dir + 'reorder_dream_dynamic_u.pkl'
else:
filepath = self.cache_dir + 'dream_dynamic_u.pkl'
dream_dynamic_u = pd.read_pickle(filepath)
return dream_dynamic_u
def craft_dream_item_embed(self, is_reordered=False):
if is_reordered is True:
filepath = self.cache_dir + 'reorder_dream_item_embed.pkl'
else:
filepath = self.cache_dir + 'dream_item_embed.pkl'
dream_item_embed = pd.read_pickle(filepath)
return dream_item_embed
def craft_order_streak(self):
with open(self.cache_dir + 'up_order_streak.pkl', 'rb') as f:
up_order_streak = pickle.load(f)
return up_order_streak
| mit |
agadiraju/519finalproject | nearest_neighbor/import_train.py | 1 | 3923 | import csv
import sys
import matplotlib.pyplot as plt
import math
import numpy as np
import dateutil.parser as dateparser
from sklearn.feature_extraction import DictVectorizer
def rmsle(trainy, predicty):
metric = 0
n = len(trainy)
for idx in xrange(n):
y_train = trainy[idx] + .0001
y_predict = predicty[idx] + .0001
if y_predict <= 0:
y_predict = .0001
# print y_predict
# print y_train
# raw_input()
metric += ((math.log(y_predict) - math.log(y_train)) ** 2)
return math.sqrt(float(1.0/n) * metric)
def plot_ride_heatmap(filename):
reader = csv.DictReader(open(filename, 'rU'), delimiter = ',')
ride_data = np.zeros(shape = (7, 24)) # days x hours
for row in reader:
hour = dateparser.parse(row['datetime']).hour
day = int(row['day']) - 1
total_rides = float(row['count'])
ride_data[day][hour] += total_rides
day_labels = ['Saturday', 'Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']
plt.pcolor(ride_data,cmap=plt.cm.Blues,edgecolors='k')
plt.xticks(np.arange(0, 24))
plt.yticks(np.arange(0, 7) + 0.5, day_labels)
plt.xlabel('Hour')
plt.ylabel('Day of the Week')
plt.title('Heatmap of Bike Rides At Specific Hours')
plt.show()
def import_training_file(filename, discrete=False):
# data = genfromtxt(filename, delimiter=',', )
reader = csv.DictReader(open(filename, 'rU'), delimiter=',')
#reader.next()
data = []
factorize_fields = ['season', 'holiday', 'workingday', 'weather']
counter = 0
for row in reader:
current_feature_dict = {}
for f in reader.fieldnames:
# parse out month
if f == 'day':
continue
if f == 'datetime':
month = dateparser.parse(row[f]).month
hour = dateparser.parse(row[f]).hour
current_feature_dict['month'] = month
current_feature_dict['hour'] = hour
elif f not in factorize_fields:
#print row[f]
current_feature_dict[f] = float(row[f])
else:
current_feature_dict[f] = row[f]
data.append(current_feature_dict)
vec = DictVectorizer()
jumble = vec.fit_transform(data).toarray() # this messes up ordering....
feature_names = vec.get_feature_names()
# correct the ordering
correct_order = ['month', 'hour', 'Sat?', 'Sun?', 'Mon?', 'Tue?', 'Wed?', 'Thu?', 'Fri?', 'season=1',
'season=2', 'season=3', 'season=4', 'holiday=0', 'holiday=1', 'workingday=0',
'workingday=1', 'weather=1', 'weather=2', 'weather=3', 'weather=4', 'temp', 'atemp',
'humidity', 'windspeed', 'casual', 'registered', 'count']
data = []
for entry in jumble:
#current_feature = []
entry_dict = dict(zip(feature_names, entry))
current_feature = [entry_dict[k] for k in correct_order]
data.append(current_feature)
data = np.array(data)
orig_n, orig_d = data.shape
feature_matrix = np.zeros(shape=(orig_n, orig_d - 3))
total_matrix = np.zeros(shape=(orig_n, 1))
registered_matrix = np.zeros(shape=(orig_n, 1))
casual_matrix = np.zeros(shape=(orig_n, 1))
n,d = feature_matrix.shape
for idx, row in enumerate(data):
total = row[-1]
registered = row[-2]
casual = row[-3]
no_label = row[:-3] # remove label and corresponding counts
if discrete:
feature_matrix[idx] = np.rint(no_label)
else:
feature_matrix[idx] = no_label
total_matrix[idx] = total
registered_matrix[idx] = registered
casual_matrix[idx] = casual
idx = np.arange(n)
np.random.seed(42)
np.random.shuffle(idx)
feature_matrix = feature_matrix[idx]
total_matrix = total_matrix[idx]
registered_matrix = registered_matrix[idx]
casual_matrix = casual_matrix[idx]
return (feature_matrix, np.ravel(total_matrix), np.ravel(registered_matrix), np.ravel(casual_matrix))
if __name__ == '__main__':
#print import_training_file(sys.argv[1])
plot_ride_heatmap(sys.argv[1])
| mit |
bmanubay/open-forcefield-tools | single-molecule-property-generation/Mol2_files/convert_mol2_to_SMILES.py | 1 | 1354 | import glob
import pandas as pd
from openeye.oechem import *
from collections import OrderedDict
import pdb
molfiles = glob.glob('*.mol2')
AlkEthOH_ID = []
SMILES_strings = []
InChI_keys = []
Molecular_formula = []
for moldex,j in enumerate(molfiles):
#if moldex>10:
#pdb.set_trace()
mol = OEGraphMol()
ifs = oemolistream(j)
ofs = oemolistream(j)
flavorin = OEIFlavor_Generic_Default | OEIFlavor_MOL2_Default | OEIFlavor_MOL2_Forcefield
flavorout = OEOFlavor_INCHIKEY_Stereo
ifs.SetFlavor(OEFormat_MOL2, flavorin)
ofs.SetFlavor(OEFormat_INCHIKEY, flavorout)
OEReadMolecule(ifs, mol)
OETriposAtomNames(mol)
smi = OEMolToSmiles(mol)
InChIkey = OECreateInChIKey(mol)
form = OEMolecularFormula(mol)
AlkEthOH_ID.append(j.split('.')[0])
SMILES_strings.append(smi)
InChI_keys.append(InChIkey)
if "S" not in InChIkey.split('-')[1]:
pdb.set_trace()
Molecular_formula.append(form)
df = pd.DataFrame({
'InChI_keys':InChI_keys,
'SMILES_strings':SMILES_strings,
'Molecular_formulas':Molecular_formula,
'AlkEthOH_ID':AlkEthOH_ID
})
df = df[['InChI_keys','SMILES_strings','Molecular_formulas','AlkEthOH_ID']]
df.to_csv('AlkEthOH_data_set_identifiers.txt',sep='\t')
| mit |
CompPhysics/MachineLearning | doc/src/LectureNotes/_build/jupyter_execute/testbook/chapter5.py | 3 | 51527 | # Harmonic Oscillator
The harmonic oscillator is omnipresent in physics. Although you may think
of this as being related to springs, it, or an equivalent
mathematical representation, appears in just about any problem where a
mode is sitting near its potential energy minimum. At that point,
$\partial_x V(x)=0$, and the first non-zero term (aside from a
constant) in the potential energy is that of a harmonic oscillator. In
a solid, sound modes (phonons) are built on a picture of coupled
harmonic oscillators, and in relativistic field theory the fundamental
interactions are also built on coupled oscillators positioned
infinitesimally close to one another in space. The phenomena of a
resonance of an oscillator driven at a fixed frequency plays out
repeatedly in atomic, nuclear and high-energy physics, when quantum
mechanically the evolution of a state oscillates according to
$e^{-iEt}$ and exciting discrete quantum states has very similar
mathematics as exciting discrete states of an oscillator.
The potential energy for a single particle as a function of its position $x$ can be written as a Taylor expansion about some point $x_0$
<!-- Equation labels as ordinary links -->
<div id="_auto1"></div>
$$
\begin{equation}
V(x)=V(x_0)+(x-x_0)\left.\partial_xV(x)\right|_{x_0}+\frac{1}{2}(x-x_0)^2\left.\partial_x^2V(x)\right|_{x_0}
+\frac{1}{3!}\left.\partial_x^3V(x)\right|_{x_0}+\cdots
\label{_auto1} \tag{1}
\end{equation}
$$
If the position $x_0$ is at the minimum of the resonance, the first two non-zero terms of the potential are
$$
\begin{eqnarray}
V(x)&\approx& V(x_0)+\frac{1}{2}(x-x_0)^2\left.\partial_x^2V(x)\right|_{x_0},\\
\nonumber
&=&V(x_0)+\frac{1}{2}k(x-x_0)^2,~~~~k\equiv \left.\partial_x^2V(x)\right|_{x_0},\\
\nonumber
F&=&-\partial_xV(x)=-k(x-x_0).
\end{eqnarray}
$$
Put into Newton's 2nd law (assuming $x_0=0$),
$$
\begin{eqnarray}
m\ddot{x}&=&-kx,\\
x&=&A\cos(\omega_0 t-\phi),~~~\omega_0=\sqrt{k/m}.
\end{eqnarray}
$$
Here $A$ and $\phi$ are arbitrary. Equivalently, one could have
written this as $A\cos(\omega_0 t)+B\sin(\omega_0 t)$, or as the real
part of $Ae^{i\omega_0 t}$. In this last case $A$ could be an
arbitrary complex constant. Thus, there are 2 arbitrary constants
(either $A$ and $B$ or $A$ and $\phi$, or the real and imaginary part
of one complex constant. This is the expectation for a second order
differential equation, and also agrees with the physical expectation
that if you know a particle's initial velocity and position you should
be able to define its future motion, and that those two arbitrary
conditions should translate to two arbitrary constants.
A key feature of harmonic motion is that the system repeats itself
after a time $T=1/f$, where $f$ is the frequency, and $\omega=2\pi f$
is the angular frequency. The period of the motion is independent of
the amplitude. However, this independence is only exact when one can
neglect higher terms of the potential, $x^3, x^4\cdots$. Once can
neglect these terms for sufficiently small amplitudes, and for larger
amplitudes the motion is no longer purely sinusoidal, and even though
the motion repeats itself, the time for repeating the motion is no
longer independent of the amplitude.
One can also calculate the velocity and the kinetic energy as a function of time,
$$
\begin{eqnarray}
\dot{x}&=&-\omega_0A\sin(\omega_0 t-\phi),\\
\nonumber
K&=&\frac{1}{2}m\dot{x}^2=\frac{m\omega_0^2A^2}{2}\sin^2(\omega_0t-\phi),\\
\nonumber
&=&\frac{k}{2}A^2\sin^2(\omega_0t-\phi).
\end{eqnarray}
$$
The total energy is then
<!-- Equation labels as ordinary links -->
<div id="_auto2"></div>
$$
\begin{equation}
E=K+V=\frac{1}{2}m\dot{x}^2+\frac{1}{2}kx^2=\frac{1}{2}kA^2.
\label{_auto2} \tag{2}
\end{equation}
$$
The total energy then goes as the square of the amplitude.
A pendulum is an example of a harmonic oscillator. By expanding the
kinetic and potential energies for small angles find the frequency for
a pendulum of length $L$ with all the mass $m$ centered at the end by
writing the eq.s of motion in the form of a harmonic oscillator.
The potential energy and kinetic energies are (for $x$ being the displacement)
$$
\begin{eqnarray*}
V&=&mgL(1-\cos\theta)\approx mgL\frac{x^2}{2L^2},\\
K&=&\frac{1}{2}mL^2\dot{\theta}^2\approx \frac{m}{2}\dot{x}^2.
\end{eqnarray*}
$$
For small $x$ Newton's 2nd law becomes
$$
m\ddot{x}=-\frac{mg}{L}x,
$$
and the spring constant would appear to be $k=mg/L$, which makes the
frequency equal to $\omega_0=\sqrt{g/L}$. Note that the frequency is
independent of the mass.
## Damped Oscillators
We consider only the case where the damping force is proportional to
the velocity. This is counter to dragging friction, where the force is
proportional in strength to the normal force and independent of
velocity, and is also inconsistent with wind resistance, where the
magnitude of the drag force is proportional the square of the
velocity. Rolling resistance does seem to be mainly proportional to
the velocity. However, the main motivation for considering damping
forces proportional to the velocity is that the math is more
friendly. This is because the differential equation is linear,
i.e. each term is of order $x$, $\dot{x}$, $\ddot{x}\cdots$, or even
terms with no mention of $x$, and there are no terms such as $x^2$ or
$x\ddot{x}$. The equations of motion for a spring with damping force
$-b\dot{x}$ are
<!-- Equation labels as ordinary links -->
<div id="_auto3"></div>
$$
\begin{equation}
m\ddot{x}+b\dot{x}+kx=0.
\label{_auto3} \tag{3}
\end{equation}
$$
Just to make the solution a bit less messy, we rewrite this equation as
<!-- Equation labels as ordinary links -->
<div id="eq:dampeddiffyq"></div>
$$
\begin{equation}
\label{eq:dampeddiffyq} \tag{4}
\ddot{x}+2\beta\dot{x}+\omega_0^2x=0,~~~~\beta\equiv b/2m,~\omega_0\equiv\sqrt{k/m}.
\end{equation}
$$
Both $\beta$ and $\omega$ have dimensions of inverse time. To find solutions (see appendix C in the text) you must make an educated guess at the form of the solution. To do this, first realize that the solution will need an arbitrary normalization $A$ because the equation is linear. Secondly, realize that if the form is
<!-- Equation labels as ordinary links -->
<div id="_auto4"></div>
$$
\begin{equation}
x=Ae^{rt}
\label{_auto4} \tag{5}
\end{equation}
$$
that each derivative simply brings out an extra power of $r$. This
means that the $Ae^{rt}$ factors out and one can simply solve for an
equation for $r$. Plugging this form into Eq. ([4](#eq:dampeddiffyq)),
<!-- Equation labels as ordinary links -->
<div id="_auto5"></div>
$$
\begin{equation}
r^2+2\beta r+\omega_0^2=0.
\label{_auto5} \tag{6}
\end{equation}
$$
Because this is a quadratic equation there will be two solutions,
<!-- Equation labels as ordinary links -->
<div id="_auto6"></div>
$$
\begin{equation}
r=-\beta\pm\sqrt{\beta^2-\omega_0^2}.
\label{_auto6} \tag{7}
\end{equation}
$$
We refer to the two solutions as $r_1$ and $r_2$ corresponding to the
$+$ and $-$ roots. As expected, there should be two arbitrary
constants involved in the solution,
<!-- Equation labels as ordinary links -->
<div id="_auto7"></div>
$$
\begin{equation}
x=A_1e^{r_1t}+A_2e^{r_2t},
\label{_auto7} \tag{8}
\end{equation}
$$
where the coefficients $A_1$ and $A_2$ are determined by initial
conditions.
The roots listed above, $\sqrt{\omega_0^2-\beta_0^2}$, will be
imaginary if the damping is small and $\beta<\omega_0$. In that case,
$r$ is complex and the factor $e{rt}$ will have some oscillatory
behavior. If the roots are real, there will only be exponentially
decaying solutions. There are three cases:
### Underdamped: $\beta<\omega_0$
$$
\begin{eqnarray}
x&=&A_1e^{-\beta t}e^{i\omega't}+A_2e^{-\beta t}e^{-i\omega't},~~\omega'\equiv\sqrt{\omega_0^2-\beta^2}\\
\nonumber
&=&(A_1+A_2)e^{-\beta t}\cos\omega't+i(A_1-A_2)e^{-\beta t}\sin\omega't.
\end{eqnarray}
$$
Here we have made use of the identity
$e^{i\omega't}=\cos\omega't+i\sin\omega't$. Because the constants are
arbitrary, and because the real and imaginary parts are both solutions
individually, we can simply consider the real part of the solution
alone:
<!-- Equation labels as ordinary links -->
<div id="eq:homogsolution"></div>
$$
\begin{eqnarray}
\label{eq:homogsolution} \tag{9}
x&=&B_1e^{-\beta t}\cos\omega't+B_2e^{-\beta t}\sin\omega't,\\
\nonumber
\omega'&\equiv&\sqrt{\omega_0^2-\beta^2}.
\end{eqnarray}
$$
### Critical dampling: $\beta=\omega_0$
In this case the two terms involving $r_1$ and $r_2$ are identical
because $\omega'=0$. Because we need to arbitrary constants, there
needs to be another solution. This is found by simply guessing, or by
taking the limit of $\omega'\rightarrow 0$ from the underdamped
solution. The solution is then
<!-- Equation labels as ordinary links -->
<div id="eq:criticallydamped"></div>
$$
\begin{equation}
\label{eq:criticallydamped} \tag{10}
x=Ae^{-\beta t}+Bte^{-\beta t}.
\end{equation}
$$
The critically damped solution is interesting because the solution
approaches zero quickly, but does not oscillate. For a problem with
zero initial velocity, the solution never crosses zero. This is a good
choice for designing shock absorbers or swinging doors.
### Overdamped: $\beta>\omega_0$
$$
\begin{eqnarray}
x&=&A_1\exp{-(\beta+\sqrt{\beta^2-\omega_0^2})t}+A_2\exp{-(\beta-\sqrt{\beta^2-\omega_0^2})t}
\end{eqnarray}
$$
This solution will also never pass the origin more than once, and then
only if the initial velocity is strong and initially toward zero.
Given $b$, $m$ and $\omega_0$, find $x(t)$ for a particle whose
initial position is $x=0$ and has initial velocity $v_0$ (assuming an
underdamped solution).
The solution is of the form,
$$
\begin{eqnarray*}
x&=&e^{-\beta t}\left[A_1\cos(\omega' t)+A_2\sin\omega't\right],\\
\dot{x}&=&-\beta x+\omega'e^{-\beta t}\left[-A_1\sin\omega't+A_2\cos\omega't\right].\\
\omega'&\equiv&\sqrt{\omega_0^2-\beta^2},~~~\beta\equiv b/2m.
\end{eqnarray*}
$$
From the initial conditions, $A_1=0$ because $x(0)=0$ and $\omega'A_2=v_0$. So
$$
x=\frac{v_0}{\omega'}e^{-\beta t}\sin\omega't.
$$
## Our Sliding Block Code
Here we study first the case without additional friction term and scale our equation
in terms of a dimensionless time $\tau$.
Let us remind ourselves about the differential equation we want to solve (the general case with damping due to friction)
$$
m\frac{d^2x}{dt^2} + b\frac{dx}{dt}+kx(t) =0.
$$
We divide by $m$ and introduce $\omega_0^2=\sqrt{k/m}$ and obtain
$$
\frac{d^2x}{dt^2} + \frac{b}{m}\frac{dx}{dt}+\omega_0^2x(t) =0.
$$
Thereafter we introduce a dimensionless time $\tau = t\omega_0$ (check
that the dimensionality is correct) and rewrite our equation as
$$
\frac{d^2x}{d\tau^2} + \frac{b}{m\omega_0}\frac{dx}{d\tau}+x(\tau) =0,
$$
which gives us
$$
\frac{d^2x}{d\tau^2} + \frac{b}{m\omega_0}\frac{dx}{d\tau}+x(\tau) =0.
$$
We then define $\gamma = b/(2m\omega_0)$ and rewrite our equations as
$$
\frac{d^2x}{d\tau^2} + 2\gamma\frac{dx}{d\tau}+x(\tau) =0.
$$
This is the equation we will code below. The first version employs the Euler-Cromer method.
%matplotlib inline
# Common imports
import numpy as np
import pandas as pd
from math import *
import matplotlib.pyplot as plt
import os
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
from pylab import plt, mpl
plt.style.use('seaborn')
mpl.rcParams['font.family'] = 'serif'
DeltaT = 0.001
#set up arrays
tfinal = 20 # in years
n = ceil(tfinal/DeltaT)
# set up arrays for t, v, and x
t = np.zeros(n)
v = np.zeros(n)
x = np.zeros(n)
# Initial conditions as simple one-dimensional arrays of time
x0 = 1.0
v0 = 0.0
x[0] = x0
v[0] = v0
gamma = 0.0
# Start integrating using Euler-Cromer's method
for i in range(n-1):
# Set up the acceleration
# Here you could have defined your own function for this
a = -2*gamma*v[i]-x[i]
# update velocity, time and position
v[i+1] = v[i] + DeltaT*a
x[i+1] = x[i] + DeltaT*v[i+1]
t[i+1] = t[i] + DeltaT
# Plot position as function of time
fig, ax = plt.subplots()
#ax.set_xlim(0, tfinal)
ax.set_ylabel('x[m]')
ax.set_xlabel('t[s]')
ax.plot(t, x)
fig.tight_layout()
save_fig("BlockEulerCromer")
plt.show()
When setting up the value of $\gamma$ we see that for $\gamma=0$ we get the simple oscillatory motion with no damping.
Choosing $\gamma < 1$ leads to the classical underdamped case with oscillatory motion, but where the motion comes to an end.
Choosing $\gamma =1$ leads to what normally is called critical damping and $\gamma> 1$ leads to critical overdamping.
Try it out and try also to change the initial position and velocity. Setting $\gamma=1$
yields a situation, as discussed above, where the solution approaches quickly zero and does not oscillate. With zero initial velocity it will never cross zero.
## Sinusoidally Driven Oscillators
Here, we consider the force
<!-- Equation labels as ordinary links -->
<div id="_auto8"></div>
$$
\begin{equation}
F=-kx-b\dot{x}+F_0\cos\omega t,
\label{_auto8} \tag{11}
\end{equation}
$$
which leads to the differential equation
<!-- Equation labels as ordinary links -->
<div id="eq:drivenosc"></div>
$$
\begin{equation}
\label{eq:drivenosc} \tag{12}
\ddot{x}+2\beta\dot{x}+\omega_0^2x=(F_0/m)\cos\omega t.
\end{equation}
$$
Consider a single solution with no arbitrary constants, which we will
call a {\it particular solution}, $x_p(t)$. It should be emphasized
that this is {\bf A} particular solution, because there exists an
infinite number of such solutions because the general solution should
have two arbitrary constants. Now consider solutions to the same
equation without the driving term, which include two arbitrary
constants. These are called either {\it homogenous solutions} or {\it
complementary solutions}, and were given in the previous section,
e.g. Eq. ([9](#eq:homogsolution)) for the underdamped case. The
homogenous solution already incorporates the two arbitrary constants,
so any sum of a homogenous solution and a particular solution will
represent the {\it general solution} of the equation. The general
solution incorporates the two arbitrary constants $A$ and $B$ to
accommodate the two initial conditions. One could have picked a
different particular solution, i.e. the original particular solution
plus any homogenous solution with the arbitrary constants $A_p$ and
$B_p$ chosen at will. When one adds in the homogenous solution, which
has adjustable constants with arbitrary constants $A'$ and $B'$, to
the new particular solution, one can get the same general solution by
simply adjusting the new constants such that $A'+A_p=A$ and
$B'+B_p=B$. Thus, the choice of $A_p$ and $B_p$ are irrelevant, and
when choosing the particular solution it is best to make the simplest
choice possible.
To find a particular solution, one first guesses at the form,
<!-- Equation labels as ordinary links -->
<div id="eq:partform"></div>
$$
\begin{equation}
\label{eq:partform} \tag{13}
x_p(t)=D\cos(\omega t-\delta),
\end{equation}
$$
and rewrite the differential equation as
<!-- Equation labels as ordinary links -->
<div id="_auto9"></div>
$$
\begin{equation}
D\left\{-\omega^2\cos(\omega t-\delta)-2\beta\omega\sin(\omega t-\delta)+\omega_0^2\cos(\omega t-\delta)\right\}=\frac{F_0}{m}\cos(\omega t).
\label{_auto9} \tag{14}
\end{equation}
$$
One can now use angle addition formulas to get
$$
\begin{eqnarray}
D\left\{(-\omega^2\cos\delta+2\beta\omega\sin\delta+\omega_0^2\cos\delta)\cos(\omega t)\right.&&\\
\nonumber
\left.+(-\omega^2\sin\delta-2\beta\omega\cos\delta+\omega_0^2\sin\delta)\sin(\omega t)\right\}
&=&\frac{F_0}{m}\cos(\omega t).
\end{eqnarray}
$$
Both the $\cos$ and $\sin$ terms need to equate if the expression is to hold at all times. Thus, this becomes two equations
$$
\begin{eqnarray}
D\left\{-\omega^2\cos\delta+2\beta\omega\sin\delta+\omega_0^2\cos\delta\right\}&=&\frac{F_0}{m}\\
\nonumber
-\omega^2\sin\delta-2\beta\omega\cos\delta+\omega_0^2\sin\delta&=&0.
\end{eqnarray}
$$
After dividing by $\cos\delta$, the lower expression leads to
<!-- Equation labels as ordinary links -->
<div id="_auto10"></div>
$$
\begin{equation}
\tan\delta=\frac{2\beta\omega}{\omega_0^2-\omega^2}.
\label{_auto10} \tag{15}
\end{equation}
$$
Using the identities $\tan^2+1=\csc^2$ and $\sin^2+\cos^2=1$, one can also express $\sin\delta$ and $\cos\delta$,
$$
\begin{eqnarray}
\sin\delta&=&\frac{2\beta\omega}{\sqrt{(\omega_0^2-\omega^2)^2+4\omega^2\beta^2}},\\
\nonumber
\cos\delta&=&\frac{(\omega_0^2-\omega^2)}{\sqrt{(\omega_0^2-\omega^2)^2+4\omega^2\beta^2}}
\end{eqnarray}
$$
Inserting the expressions for $\cos\delta$ and $\sin\delta$ into the expression for $D$,
<!-- Equation labels as ordinary links -->
<div id="eq:Ddrive"></div>
$$
\begin{equation}
\label{eq:Ddrive} \tag{16}
D=\frac{F_0/m}{\sqrt{(\omega_0^2-\omega^2)^2+4\omega^2\beta^2}}.
\end{equation}
$$
For a given initial condition, e.g. initial displacement and velocity,
one must add the homogenous solution then solve for the two arbitrary
constants. However, because the homogenous solutions decay with time
as $e^{-\beta t}$, the particular solution is all that remains at
large times, and is therefore the steady state solution. Because the
arbitrary constants are all in the homogenous solution, all memory of
the initial conditions are lost at large times, $t>>1/\beta$.
The amplitude of the motion, $D$, is linearly proportional to the
driving force ($F_0/m$), but also depends on the driving frequency
$\omega$. For small $\beta$ the maximum will occur at
$\omega=\omega_0$. This is referred to as a resonance. In the limit
$\beta\rightarrow 0$ the amplitude at resonance approaches infinity.
## Alternative Derivation for Driven Oscillators
Here, we derive the same expressions as in Equations ([13](#eq:partform)) and ([16](#eq:Ddrive)) but express the driving forces as
$$
\begin{eqnarray}
F(t)&=&F_0e^{i\omega t},
\end{eqnarray}
$$
rather than as $F_0\cos\omega t$. The real part of $F$ is the same as before. For the differential equation,
<!-- Equation labels as ordinary links -->
<div id="eq:compdrive"></div>
$$
\begin{eqnarray}
\label{eq:compdrive} \tag{17}
\ddot{x}+2\beta\dot{x}+\omega_0^2x&=&\frac{F_0}{m}e^{i\omega t},
\end{eqnarray}
$$
one can treat $x(t)$ as an imaginary function. Because the operations
$d^2/dt^2$ and $d/dt$ are real and thus do not mix the real and
imaginary parts of $x(t)$, Eq. ([17](#eq:compdrive)) is effectively 2
equations. Because $e^{\omega t}=\cos\omega t+i\sin\omega t$, the real
part of the solution for $x(t)$ gives the solution for a driving force
$F_0\cos\omega t$, and the imaginary part of $x$ corresponds to the
case where the driving force is $F_0\sin\omega t$. It is rather easy
to solve for the complex $x$ in this case, and by taking the real part
of the solution, one finds the answer for the $\cos\omega t$ driving
force.
We assume a simple form for the particular solution
<!-- Equation labels as ordinary links -->
<div id="_auto11"></div>
$$
\begin{equation}
x_p=De^{i\omega t},
\label{_auto11} \tag{18}
\end{equation}
$$
where $D$ is a complex constant.
From Eq. ([17](#eq:compdrive)) one inserts the form for $x_p$ above to get
$$
\begin{eqnarray}
D\left\{-\omega^2+2i\beta\omega+\omega_0^2\right\}e^{i\omega t}=(F_0/m)e^{i\omega t},\\
\nonumber
D=\frac{F_0/m}{(\omega_0^2-\omega^2)+2i\beta\omega}.
\end{eqnarray}
$$
The norm and phase for $D=|D|e^{-i\delta}$ can be read by inspection,
<!-- Equation labels as ordinary links -->
<div id="_auto12"></div>
$$
\begin{equation}
|D|=\frac{F_0/m}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}},~~~~\tan\delta=\frac{2\beta\omega}{\omega_0^2-\omega^2}.
\label{_auto12} \tag{19}
\end{equation}
$$
This is the same expression for $\delta$ as before. One then finds $x_p(t)$,
<!-- Equation labels as ordinary links -->
<div id="eq:fastdriven1"></div>
$$
\begin{eqnarray}
\label{eq:fastdriven1} \tag{20}
x_p(t)&=&\Re\frac{(F_0/m)e^{i\omega t-i\delta}}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}}\\
\nonumber
&=&\frac{(F_0/m)\cos(\omega t-\delta)}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}}.
\end{eqnarray}
$$
This is the same answer as before.
If one wished to solve for the case where $F(t)= F_0\sin\omega t$, the imaginary part of the solution would work
<!-- Equation labels as ordinary links -->
<div id="eq:fastdriven2"></div>
$$
\begin{eqnarray}
\label{eq:fastdriven2} \tag{21}
x_p(t)&=&\Im\frac{(F_0/m)e^{i\omega t-i\delta}}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}}\\
\nonumber
&=&\frac{(F_0/m)\sin(\omega t-\delta)}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}}.
\end{eqnarray}
$$
Consider the damped and driven harmonic oscillator worked out above. Given $F_0, m,\beta$ and $\omega_0$, solve for the complete solution $x(t)$ for the case where $F=F_0\sin\omega t$ with initial conditions $x(t=0)=0$ and $v(t=0)=0$. Assume the underdamped case.
The general solution including the arbitrary constants includes both the homogenous and particular solutions,
$$
\begin{eqnarray*}
x(t)&=&\frac{F_0}{m}\frac{\sin(\omega t-\delta)}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}}
+A\cos\omega't e^{-\beta t}+B\sin\omega't e^{-\beta t}.
\end{eqnarray*}
$$
The quantities $\delta$ and $\omega'$ are given earlier in the
section, $\omega'=\sqrt{\omega_0^2-\beta^2},
\delta=\tan^{-1}(2\beta\omega/(\omega_0^2-\omega^2)$. Here, solving
the problem means finding the arbitrary constants $A$ and
$B$. Satisfying the initial conditions for the initial position and
velocity:
$$
\begin{eqnarray*}
x(t=0)=0&=&-\eta\sin\delta+A,\\
v(t=0)=0&=&\omega\eta\cos\delta-\beta A+\omega'B,\\
\eta&\equiv&\frac{F_0}{m}\frac{1}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}}.
\end{eqnarray*}
$$
The problem is now reduced to 2 equations and 2 unknowns, $A$ and $B$. The solution is
$$
\begin{eqnarray}
A&=& \eta\sin\delta ,~~~B=\frac{-\omega\eta\cos\delta+\beta\eta\sin\delta}{\omega'}.
\end{eqnarray}
$$
## Resonance Widths; the $Q$ factor
From the previous two sections, the particular solution for a driving force, $F=F_0\cos\omega t$, is
$$
\begin{eqnarray}
x_p(t)&=&\frac{F_0/m}{\sqrt{(\omega_0^2-\omega^2)^2+4\omega^2\beta^2}}\cos(\omega_t-\delta),\\
\nonumber
\delta&=&\tan^{-1}\left(\frac{2\beta\omega}{\omega_0^2-\omega^2}\right).
\end{eqnarray}
$$
If one fixes the driving frequency $\omega$ and adjusts the
fundamental frequency $\omega_0=\sqrt{k/m}$, the maximum amplitude
occurs when $\omega_0=\omega$ because that is when the term from the
denominator $(\omega_0^2-\omega^2)^2+4\omega^2\beta^2$ is at a
minimum. This is akin to dialing into a radio station. However, if one
fixes $\omega_0$ and adjusts the driving frequency one minimize with
respect to $\omega$, e.g. set
<!-- Equation labels as ordinary links -->
<div id="_auto13"></div>
$$
\begin{equation}
\frac{d}{d\omega}\left[(\omega_0^2-\omega^2)^2+4\omega^2\beta^2\right]=0,
\label{_auto13} \tag{22}
\end{equation}
$$
and one finds that the maximum amplitude occurs when
$\omega=\sqrt{\omega_0^2-2\beta^2}$. If $\beta$ is small relative to
$\omega_0$, one can simply state that the maximum amplitude is
<!-- Equation labels as ordinary links -->
<div id="_auto14"></div>
$$
\begin{equation}
x_{\rm max}\approx\frac{F_0}{2m\beta \omega_0}.
\label{_auto14} \tag{23}
\end{equation}
$$
$$
\begin{eqnarray}
\frac{4\omega^2\beta^2}{(\omega_0^2-\omega^2)^2+4\omega^2\beta^2}=\frac{1}{2}.
\end{eqnarray}
$$
For small damping this occurs when $\omega=\omega_0\pm \beta$, so the $FWHM\approx 2\beta$. For the purposes of tuning to a specific frequency, one wants the width to be as small as possible. The ratio of $\omega_0$ to $FWHM$ is known as the {\it quality} factor, or $Q$ factor,
<!-- Equation labels as ordinary links -->
<div id="_auto15"></div>
$$
\begin{equation}
Q\equiv \frac{\omega_0}{2\beta}.
\label{_auto15} \tag{24}
\end{equation}
$$
## Numerical Studies of Driven Oscillations
Solving the problem of driven oscillations numerically gives us much
more flexibility to study different types of driving forces. We can
reuse our earlier code by simply adding a driving force. If we stay in
the $x$-direction only this can be easily done by adding a term
$F_{\mathrm{ext}}(x,t)$. Note that we have kept it rather general
here, allowing for both a spatial and a temporal dependence.
Before we dive into the code, we need to briefly remind ourselves
about the equations we started with for the case with damping, namely
$$
m\frac{d^2x}{dt^2} + b\frac{dx}{dt}+kx(t) =0,
$$
with no external force applied to the system.
Let us now for simplicty assume that our external force is given by
$$
F_{\mathrm{ext}}(t) = F_0\cos{(\omega t)},
$$
where $F_0$ is a constant (what is its dimension?) and $\omega$ is the frequency of the applied external driving force.
**Small question:** would you expect energy to be conserved now?
Introducing the external force into our lovely differential equation
and dividing by $m$ and introducing $\omega_0^2=\sqrt{k/m}$ we have
$$
\frac{d^2x}{dt^2} + \frac{b}{m}\frac{dx}{dt}+\omega_0^2x(t) =\frac{F_0}{m}\cos{(\omega t)},
$$
Thereafter we introduce a dimensionless time $\tau = t\omega_0$
and a dimensionless frequency $\tilde{\omega}=\omega/\omega_0$. We have then
$$
\frac{d^2x}{d\tau^2} + \frac{b}{m\omega_0}\frac{dx}{d\tau}+x(\tau) =\frac{F_0}{m\omega_0^2}\cos{(\tilde{\omega}\tau)},
$$
Introducing a new amplitude $\tilde{F} =F_0/(m\omega_0^2)$ (check dimensionality again) we have
$$
\frac{d^2x}{d\tau^2} + \frac{b}{m\omega_0}\frac{dx}{d\tau}+x(\tau) =\tilde{F}\cos{(\tilde{\omega}\tau)}.
$$
Our final step, as we did in the case of various types of damping, is
to define $\gamma = b/(2m\omega_0)$ and rewrite our equations as
$$
\frac{d^2x}{d\tau^2} + 2\gamma\frac{dx}{d\tau}+x(\tau) =\tilde{F}\cos{(\tilde{\omega}\tau)}.
$$
This is the equation we will code below using the Euler-Cromer method.
DeltaT = 0.001
#set up arrays
tfinal = 20 # in years
n = ceil(tfinal/DeltaT)
# set up arrays for t, v, and x
t = np.zeros(n)
v = np.zeros(n)
x = np.zeros(n)
# Initial conditions as one-dimensional arrays of time
x0 = 1.0
v0 = 0.0
x[0] = x0
v[0] = v0
gamma = 0.2
Omegatilde = 0.5
Ftilde = 1.0
# Start integrating using Euler-Cromer's method
for i in range(n-1):
# Set up the acceleration
# Here you could have defined your own function for this
a = -2*gamma*v[i]-x[i]+Ftilde*cos(t[i]*Omegatilde)
# update velocity, time and position
v[i+1] = v[i] + DeltaT*a
x[i+1] = x[i] + DeltaT*v[i+1]
t[i+1] = t[i] + DeltaT
# Plot position as function of time
fig, ax = plt.subplots()
ax.set_ylabel('x[m]')
ax.set_xlabel('t[s]')
ax.plot(t, x)
fig.tight_layout()
save_fig("ForcedBlockEulerCromer")
plt.show()
In the above example we have focused on the Euler-Cromer method. This
method has a local truncation error which is proportional to $\Delta t^2$
and thereby a global error which is proportional to $\Delta t$.
We can improve this by using the Runge-Kutta family of
methods. The widely popular Runge-Kutta to fourth order or just **RK4**
has indeed a much better truncation error. The RK4 method has a global
error which is proportional to $\Delta t$.
Let us revisit this method and see how we can implement it for the above example.
## Differential Equations, Runge-Kutta methods
Runge-Kutta (RK) methods are based on Taylor expansion formulae, but yield
in general better algorithms for solutions of an ordinary differential equation.
The basic philosophy is that it provides an intermediate step in the computation of $y_{i+1}$.
To see this, consider first the following definitions
<!-- Equation labels as ordinary links -->
<div id="_auto16"></div>
$$
\begin{equation}
\frac{dy}{dt}=f(t,y),
\label{_auto16} \tag{25}
\end{equation}
$$
and
<!-- Equation labels as ordinary links -->
<div id="_auto17"></div>
$$
\begin{equation}
y(t)=\int f(t,y) dt,
\label{_auto17} \tag{26}
\end{equation}
$$
and
<!-- Equation labels as ordinary links -->
<div id="_auto18"></div>
$$
\begin{equation}
y_{i+1}=y_i+ \int_{t_i}^{t_{i+1}} f(t,y) dt.
\label{_auto18} \tag{27}
\end{equation}
$$
To demonstrate the philosophy behind RK methods, let us consider
the second-order RK method, RK2.
The first approximation consists in Taylor expanding $f(t,y)$
around the center of the integration interval $t_i$ to $t_{i+1}$,
that is, at $t_i+h/2$, $h$ being the step.
Using the midpoint formula for an integral,
defining $y(t_i+h/2) = y_{i+1/2}$ and
$t_i+h/2 = t_{i+1/2}$, we obtain
<!-- Equation labels as ordinary links -->
<div id="_auto19"></div>
$$
\begin{equation}
\int_{t_i}^{t_{i+1}} f(t,y) dt \approx hf(t_{i+1/2},y_{i+1/2}) +O(h^3).
\label{_auto19} \tag{28}
\end{equation}
$$
This means in turn that we have
<!-- Equation labels as ordinary links -->
<div id="_auto20"></div>
$$
\begin{equation}
y_{i+1}=y_i + hf(t_{i+1/2},y_{i+1/2}) +O(h^3).
\label{_auto20} \tag{29}
\end{equation}
$$
However, we do not know the value of $y_{i+1/2}$. Here comes thus the next approximation, namely, we use Euler's
method to approximate $y_{i+1/2}$. We have then
<!-- Equation labels as ordinary links -->
<div id="_auto21"></div>
$$
\begin{equation}
y_{(i+1/2)}=y_i + \frac{h}{2}\frac{dy}{dt}=y(t_i) + \frac{h}{2}f(t_i,y_i).
\label{_auto21} \tag{30}
\end{equation}
$$
This means that we can define the following algorithm for
the second-order Runge-Kutta method, RK2.
6
0
<
<
<
!
!
M
A
T
H
_
B
L
O
C
K
<!-- Equation labels as ordinary links -->
<div id="_auto23"></div>
$$
\begin{equation}
k_2=hf(t_{i+1/2},y_i+k_1/2),
\label{_auto23} \tag{32}
\end{equation}
$$
with the final value
<!-- Equation labels as ordinary links -->
<div id="_auto24"></div>
$$
\begin{equation}
y_{i+i}\approx y_i + k_2 +O(h^3).
\label{_auto24} \tag{33}
\end{equation}
$$
The difference between the previous one-step methods
is that we now need an intermediate step in our evaluation,
namely $t_i+h/2 = t_{(i+1/2)}$ where we evaluate the derivative $f$.
This involves more operations, but the gain is a better stability
in the solution.
The fourth-order Runge-Kutta, RK4, has the following algorithm
6
3
<
<
<
!
!
M
A
T
H
_
B
L
O
C
K
$$
k_3=hf(t_i+h/2,y_i+k_2/2)\hspace{0.5cm} k_4=hf(t_i+h,y_i+k_3)
$$
with the final result
$$
y_{i+1}=y_i +\frac{1}{6}\left( k_1 +2k_2+2k_3+k_4\right).
$$
Thus, the algorithm consists in first calculating $k_1$
with $t_i$, $y_1$ and $f$ as inputs. Thereafter, we increase the step
size by $h/2$ and calculate $k_2$, then $k_3$ and finally $k_4$. The global error goes as $O(h^4)$.
However, at this stage, if we keep adding different methods in our
main program, the code will quickly become messy and ugly. Before we
proceed thus, we will now introduce functions that enbody the various
methods for solving differential equations. This means that we can
separate out these methods in own functions and files (and later as classes and more
generic functions) and simply call them when needed. Similarly, we
could easily encapsulate various forces or other quantities of
interest in terms of functions. To see this, let us bring up the code
we developed above for the simple sliding block, but now only with the simple forward Euler method. We introduce
two functions, one for the simple Euler method and one for the
force.
Note that here the forward Euler method does not know the specific force function to be called.
It receives just an input the name. We can easily change the force by adding another function.
def ForwardEuler(v,x,t,n,Force):
for i in range(n-1):
v[i+1] = v[i] + DeltaT*Force(v[i],x[i],t[i])
x[i+1] = x[i] + DeltaT*v[i]
t[i+1] = t[i] + DeltaT
def SpringForce(v,x,t):
# note here that we have divided by mass and we return the acceleration
return -2*gamma*v-x+Ftilde*cos(t*Omegatilde)
It is easy to add a new method like the Euler-Cromer
def ForwardEulerCromer(v,x,t,n,Force):
for i in range(n-1):
a = Force(v[i],x[i],t[i])
v[i+1] = v[i] + DeltaT*a
x[i+1] = x[i] + DeltaT*v[i+1]
t[i+1] = t[i] + DeltaT
and the Velocity Verlet method (be careful with time-dependence here, it is not an ideal method for non-conservative forces))
def VelocityVerlet(v,x,t,n,Force):
for i in range(n-1):
a = Force(v[i],x[i],t[i])
x[i+1] = x[i] + DeltaT*v[i]+0.5*a
anew = Force(v[i],x[i+1],t[i+1])
v[i+1] = v[i] + 0.5*DeltaT*(a+anew)
t[i+1] = t[i] + DeltaT
Finally, we can now add the Runge-Kutta2 method via a new function
def RK2(v,x,t,n,Force):
for i in range(n-1):
# Setting up k1
k1x = DeltaT*v[i]
k1v = DeltaT*Force(v[i],x[i],t[i])
# Setting up k2
vv = v[i]+k1v*0.5
xx = x[i]+k1x*0.5
k2x = DeltaT*vv
k2v = DeltaT*Force(vv,xx,t[i]+DeltaT*0.5)
# Final result
x[i+1] = x[i]+k2x
v[i+1] = v[i]+k2v
t[i+1] = t[i]+DeltaT
Finally, we can now add the Runge-Kutta2 method via a new function
def RK4(v,x,t,n,Force):
for i in range(n-1):
# Setting up k1
k1x = DeltaT*v[i]
k1v = DeltaT*Force(v[i],x[i],t[i])
# Setting up k2
vv = v[i]+k1v*0.5
xx = x[i]+k1x*0.5
k2x = DeltaT*vv
k2v = DeltaT*Force(vv,xx,t[i]+DeltaT*0.5)
# Setting up k3
vv = v[i]+k2v*0.5
xx = x[i]+k2x*0.5
k3x = DeltaT*vv
k3v = DeltaT*Force(vv,xx,t[i]+DeltaT*0.5)
# Setting up k4
vv = v[i]+k3v
xx = x[i]+k3x
k4x = DeltaT*vv
k4v = DeltaT*Force(vv,xx,t[i]+DeltaT)
# Final result
x[i+1] = x[i]+(k1x+2*k2x+2*k3x+k4x)/6.
v[i+1] = v[i]+(k1v+2*k2v+2*k3v+k4v)/6.
t[i+1] = t[i] + DeltaT
The Runge-Kutta family of methods are particularly useful when we have a time-dependent acceleration.
If we have forces which depend only the spatial degrees of freedom (no velocity and/or time-dependence), then energy conserving methods like the Velocity Verlet or the Euler-Cromer method are preferred. As soon as we introduce an explicit time-dependence and/or add dissipitave forces like friction or air resistance, then methods like the family of Runge-Kutta methods are well suited for this.
The code below uses the Runge-Kutta4 methods.
DeltaT = 0.001
#set up arrays
tfinal = 20 # in years
n = ceil(tfinal/DeltaT)
# set up arrays for t, v, and x
t = np.zeros(n)
v = np.zeros(n)
x = np.zeros(n)
# Initial conditions (can change to more than one dim)
x0 = 1.0
v0 = 0.0
x[0] = x0
v[0] = v0
gamma = 0.2
Omegatilde = 0.5
Ftilde = 1.0
# Start integrating using Euler's method
# Note that we define the force function as a SpringForce
RK4(v,x,t,n,SpringForce)
# Plot position as function of time
fig, ax = plt.subplots()
ax.set_ylabel('x[m]')
ax.set_xlabel('t[s]')
ax.plot(t, x)
fig.tight_layout()
save_fig("ForcedBlockRK4")
plt.show()
## Principle of Superposition and Periodic Forces (Fourier Transforms)
If one has several driving forces, $F(t)=\sum_n F_n(t)$, one can find
the particular solution to each $F_n$, $x_{pn}(t)$, and the particular
solution for the entire driving force is
<!-- Equation labels as ordinary links -->
<div id="_auto25"></div>
$$
\begin{equation}
x_p(t)=\sum_nx_{pn}(t).
\label{_auto25} \tag{34}
\end{equation}
$$
This is known as the principal of superposition. It only applies when
the homogenous equation is linear. If there were an anharmonic term
such as $x^3$ in the homogenous equation, then when one summed various
solutions, $x=(\sum_n x_n)^2$, one would get cross
terms. Superposition is especially useful when $F(t)$ can be written
as a sum of sinusoidal terms, because the solutions for each
sinusoidal (sine or cosine) term is analytic, as we saw above.
Driving forces are often periodic, even when they are not
sinusoidal. Periodicity implies that for some time $\tau$
$$
\begin{eqnarray}
F(t+\tau)=F(t).
\end{eqnarray}
$$
One example of a non-sinusoidal periodic force is a square wave. Many
components in electric circuits are non-linear, e.g. diodes, which
makes many wave forms non-sinusoidal even when the circuits are being
driven by purely sinusoidal sources.
The code here shows a typical example of such a square wave generated using the functionality included in the **scipy** Python package. We have used a period of $\tau=0.2$.
import numpy as np
import math
from scipy import signal
import matplotlib.pyplot as plt
# number of points
n = 500
# start and final times
t0 = 0.0
tn = 1.0
# Period
t = np.linspace(t0, tn, n, endpoint=False)
SqrSignal = np.zeros(n)
SqrSignal = 1.0+signal.square(2*np.pi*5*t)
plt.plot(t, SqrSignal)
plt.ylim(-0.5, 2.5)
plt.show()
For the sinusoidal example studied in the previous subsections the
period is $\tau=2\pi/\omega$. However, higher harmonics can also
satisfy the periodicity requirement. In general, any force that
satisfies the periodicity requirement can be expressed as a sum over
harmonics,
<!-- Equation labels as ordinary links -->
<div id="_auto26"></div>
$$
\begin{equation}
F(t)=\frac{f_0}{2}+\sum_{n>0} f_n\cos(2n\pi t/\tau)+g_n\sin(2n\pi t/\tau).
\label{_auto26} \tag{35}
\end{equation}
$$
From the previous subsection, one can write down the answer for
$x_{pn}(t)$, by substituting $f_n/m$ or $g_n/m$ for $F_0/m$ into Eq.s
([20](#eq:fastdriven1)) or ([21](#eq:fastdriven2)) respectively. By
writing each factor $2n\pi t/\tau$ as $n\omega t$, with $\omega\equiv
2\pi/\tau$,
<!-- Equation labels as ordinary links -->
<div id="eq:fourierdef1"></div>
$$
\begin{equation}
\label{eq:fourierdef1} \tag{36}
F(t)=\frac{f_0}{2}+\sum_{n>0}f_n\cos(n\omega t)+g_n\sin(n\omega t).
\end{equation}
$$
The solutions for $x(t)$ then come from replacing $\omega$ with
$n\omega$ for each term in the particular solution in Equations
([13](#eq:partform)) and ([16](#eq:Ddrive)),
$$
\begin{eqnarray}
x_p(t)&=&\frac{f_0}{2k}+\sum_{n>0} \alpha_n\cos(n\omega t-\delta_n)+\beta_n\sin(n\omega t-\delta_n),\\
\nonumber
\alpha_n&=&\frac{f_n/m}{\sqrt{((n\omega)^2-\omega_0^2)+4\beta^2n^2\omega^2}},\\
\nonumber
\beta_n&=&\frac{g_n/m}{\sqrt{((n\omega)^2-\omega_0^2)+4\beta^2n^2\omega^2}},\\
\nonumber
\delta_n&=&\tan^{-1}\left(\frac{2\beta n\omega}{\omega_0^2-n^2\omega^2}\right).
\end{eqnarray}
$$
Because the forces have been applied for a long time, any non-zero
damping eliminates the homogenous parts of the solution, so one need
only consider the particular solution for each $n$.
The problem will considered solved if one can find expressions for the
coefficients $f_n$ and $g_n$, even though the solutions are expressed
as an infinite sum. The coefficients can be extracted from the
function $F(t)$ by
<!-- Equation labels as ordinary links -->
<div id="eq:fourierdef2"></div>
$$
\begin{eqnarray}
\label{eq:fourierdef2} \tag{37}
f_n&=&\frac{2}{\tau}\int_{-\tau/2}^{\tau/2} dt~F(t)\cos(2n\pi t/\tau),\\
\nonumber
g_n&=&\frac{2}{\tau}\int_{-\tau/2}^{\tau/2} dt~F(t)\sin(2n\pi t/\tau).
\end{eqnarray}
$$
To check the consistency of these expressions and to verify
Eq. ([37](#eq:fourierdef2)), one can insert the expansion of $F(t)$ in
Eq. ([36](#eq:fourierdef1)) into the expression for the coefficients in
Eq. ([37](#eq:fourierdef2)) and see whether
$$
\begin{eqnarray}
f_n&=?&\frac{2}{\tau}\int_{-\tau/2}^{\tau/2} dt~\left\{
\frac{f_0}{2}+\sum_{m>0}f_m\cos(m\omega t)+g_m\sin(m\omega t)
\right\}\cos(n\omega t).
\end{eqnarray}
$$
Immediately, one can throw away all the terms with $g_m$ because they
convolute an even and an odd function. The term with $f_0/2$
disappears because $\cos(n\omega t)$ is equally positive and negative
over the interval and will integrate to zero. For all the terms
$f_m\cos(m\omega t)$ appearing in the sum, one can use angle addition
formulas to see that $\cos(m\omega t)\cos(n\omega
t)=(1/2)(\cos[(m+n)\omega t]+\cos[(m-n)\omega t]$. This will integrate
to zero unless $m=n$. In that case the $m=n$ term gives
<!-- Equation labels as ordinary links -->
<div id="_auto27"></div>
$$
\begin{equation}
\int_{-\tau/2}^{\tau/2}dt~\cos^2(m\omega t)=\frac{\tau}{2},
\label{_auto27} \tag{38}
\end{equation}
$$
and
$$
\begin{eqnarray}
f_n&=?&\frac{2}{\tau}\int_{-\tau/2}^{\tau/2} dt~f_n/2\\
\nonumber
&=&f_n~\checkmark.
\end{eqnarray}
$$
The same method can be used to check for the consistency of $g_n$.
Consider the driving force:
<!-- Equation labels as ordinary links -->
<div id="_auto28"></div>
$$
\begin{equation}
F(t)=At/\tau,~~-\tau/2<t<\tau/2,~~~F(t+\tau)=F(t).
\label{_auto28} \tag{39}
\end{equation}
$$
Find the Fourier coefficients $f_n$ and $g_n$ for all $n$ using Eq. ([37](#eq:fourierdef2)).
Only the odd coefficients enter by symmetry, i.e. $f_n=0$. One can find $g_n$ integrating by parts,
<!-- Equation labels as ordinary links -->
<div id="eq:fouriersolution"></div>
$$
\begin{eqnarray}
\label{eq:fouriersolution} \tag{40}
g_n&=&\frac{2}{\tau}\int_{-\tau/2}^{\tau/2}dt~\sin(n\omega t) \frac{At}{\tau}\\
\nonumber
u&=&t,~dv=\sin(n\omega t)dt,~v=-\cos(n\omega t)/(n\omega),\\
\nonumber
g_n&=&\frac{-2A}{n\omega \tau^2}\int_{-\tau/2}^{\tau/2}dt~\cos(n\omega t)
+\left.2A\frac{-t\cos(n\omega t)}{n\omega\tau^2}\right|_{-\tau/2}^{\tau/2}.
\end{eqnarray}
$$
The first term is zero because $\cos(n\omega t)$ will be equally
positive and negative over the interval. Using the fact that
$\omega\tau=2\pi$,
$$
\begin{eqnarray}
g_n&=&-\frac{2A}{2n\pi}\cos(n\omega\tau/2)\\
\nonumber
&=&-\frac{A}{n\pi}\cos(n\pi)\\
\nonumber
&=&\frac{A}{n\pi}(-1)^{n+1}.
\end{eqnarray}
$$
## Fourier Series
More text will come here, chpater 5.7-5.8 of Taylor are discussed
during the lectures. The code here uses the Fourier series discussed
in chapter 5.7 for a square wave signal. The equations for the
coefficients are are discussed in Taylor section 5.7, see Example
5.4. The code here visualizes the various approximations given by
Fourier series compared with a square wave with period $T=0.2$, witth
$0.1$ and max value $F=2$. We see that when we increase the number of
components in the Fourier series, the Fourier series approximation gets closes and closes to the square wave signal.
import numpy as np
import math
from scipy import signal
import matplotlib.pyplot as plt
# number of points
n = 500
# start and final times
t0 = 0.0
tn = 1.0
# Period
T =0.2
# Max value of square signal
Fmax= 2.0
# Width of signal
Width = 0.1
t = np.linspace(t0, tn, n, endpoint=False)
SqrSignal = np.zeros(n)
FourierSeriesSignal = np.zeros(n)
SqrSignal = 1.0+signal.square(2*np.pi*5*t+np.pi*Width/T)
a0 = Fmax*Width/T
FourierSeriesSignal = a0
Factor = 2.0*Fmax/np.pi
for i in range(1,500):
FourierSeriesSignal += Factor/(i)*np.sin(np.pi*i*Width/T)*np.cos(i*t*2*np.pi/T)
plt.plot(t, SqrSignal)
plt.plot(t, FourierSeriesSignal)
plt.ylim(-0.5, 2.5)
plt.show()
## Solving differential equations with Fouries series
The material here was discussed during the lecture of February 19 and 21.
It is also covered by Taylor in section 5.8.
## Response to Transient Force
Consider a particle at rest in the bottom of an underdamped harmonic
oscillator, that then feels a sudden impulse, or change in momentum,
$I=F\Delta t$ at $t=0$. This increases the velocity immediately by an
amount $v_0=I/m$ while not changing the position. One can then solve
the trajectory by solving Eq. ([9](#eq:homogsolution)) with initial
conditions $v_0=I/m$ and $x_0=0$. This gives
<!-- Equation labels as ordinary links -->
<div id="_auto29"></div>
$$
\begin{equation}
x(t)=\frac{I}{m\omega'}e^{-\beta t}\sin\omega't, ~~t>0.
\label{_auto29} \tag{41}
\end{equation}
$$
Here, $\omega'=\sqrt{\omega_0^2-\beta^2}$. For an impulse $I_i$ that
occurs at time $t_i$ the trajectory would be
<!-- Equation labels as ordinary links -->
<div id="_auto30"></div>
$$
\begin{equation}
x(t)=\frac{I_i}{m\omega'}e^{-\beta (t-t_i)}\sin[\omega'(t-t_i)] \Theta(t-t_i),
\label{_auto30} \tag{42}
\end{equation}
$$
where $\Theta(t-t_i)$ is a step function, i.e. $\Theta(x)$ is zero for
$x<0$ and unity for $x>0$. If there were several impulses linear
superposition tells us that we can sum over each contribution,
<!-- Equation labels as ordinary links -->
<div id="_auto31"></div>
$$
\begin{equation}
x(t)=\sum_i\frac{I_i}{m\omega'}e^{-\beta(t-t_i)}\sin[\omega'(t-t_i)]\Theta(t-t_i)
\label{_auto31} \tag{43}
\end{equation}
$$
Now one can consider a series of impulses at times separated by
$\Delta t$, where each impulse is given by $F_i\Delta t$. The sum
above now becomes an integral,
<!-- Equation labels as ordinary links -->
<div id="eq:Greeny"></div>
$$
\begin{eqnarray}\label{eq:Greeny} \tag{44}
x(t)&=&\int_{-\infty}^\infty dt'~F(t')\frac{e^{-\beta(t-t')}\sin[\omega'(t-t')]}{m\omega'}\Theta(t-t')\\
\nonumber
&=&\int_{-\infty}^\infty dt'~F(t')G(t-t'),\\
\nonumber
G(\Delta t)&=&\frac{e^{-\beta\Delta t}\sin[\omega' \Delta t]}{m\omega'}\Theta(\Delta t)
\end{eqnarray}
$$
The quantity
$e^{-\beta(t-t')}\sin[\omega'(t-t')]/m\omega'\Theta(t-t')$ is called a
Green's function, $G(t-t')$. It describes the response at $t$ due to a
force applied at a time $t'$, and is a function of $t-t'$. The step
function ensures that the response does not occur before the force is
applied. One should remember that the form for $G$ would change if the
oscillator were either critically- or over-damped.
When performing the integral in Eq. ([44](#eq:Greeny)) one can use
angle addition formulas to factor out the part with the $t'$
dependence in the integrand,
<!-- Equation labels as ordinary links -->
<div id="eq:Greeny2"></div>
$$
\begin{eqnarray}
\label{eq:Greeny2} \tag{45}
x(t)&=&\frac{1}{m\omega'}e^{-\beta t}\left[I_c(t)\sin(\omega't)-I_s(t)\cos(\omega't)\right],\\
\nonumber
I_c(t)&\equiv&\int_{-\infty}^t dt'~F(t')e^{\beta t'}\cos(\omega't'),\\
\nonumber
I_s(t)&\equiv&\int_{-\infty}^t dt'~F(t')e^{\beta t'}\sin(\omega't').
\end{eqnarray}
$$
If the time $t$ is beyond any time at which the force acts,
$F(t'>t)=0$, the coefficients $I_c$ and $I_s$ become independent of
$t$.
Consider an undamped oscillator ($\beta\rightarrow 0$), with
characteristic frequency $\omega_0$ and mass $m$, that is at rest
until it feels a force described by a Gaussian form,
$$
\begin{eqnarray*}
F(t)&=&F_0 \exp\left\{\frac{-t^2}{2\tau^2}\right\}.
\end{eqnarray*}
$$
For large times ($t>>\tau$), where the force has died off, find
$x(t)$.\\ Solve for the coefficients $I_c$ and $I_s$ in
Eq. ([45](#eq:Greeny2)). Because the Gaussian is an even function,
$I_s=0$, and one need only solve for $I_c$,
$$
\begin{eqnarray*}
I_c&=&F_0\int_{-\infty}^\infty dt'~e^{-t^{\prime 2}/(2\tau^2)}\cos(\omega_0 t')\\
&=&\Re F_0 \int_{-\infty}^\infty dt'~e^{-t^{\prime 2}/(2\tau^2)}e^{i\omega_0 t'}\\
&=&\Re F_0 \int_{-\infty}^\infty dt'~e^{-(t'-i\omega_0\tau^2)^2/(2\tau^2)}e^{-\omega_0^2\tau^2/2}\\
&=&F_0\tau \sqrt{2\pi} e^{-\omega_0^2\tau^2/2}.
\end{eqnarray*}
$$
The third step involved completing the square, and the final step used the fact that the integral
$$
\begin{eqnarray*}
\int_{-\infty}^\infty dx~e^{-x^2/2}&=&\sqrt{2\pi}.
\end{eqnarray*}
$$
To see that this integral is true, consider the square of the integral, which you can change to polar coordinates,
$$
\begin{eqnarray*}
I&=&\int_{-\infty}^\infty dx~e^{-x^2/2}\\
I^2&=&\int_{-\infty}^\infty dxdy~e^{-(x^2+y^2)/2}\\
&=&2\pi\int_0^\infty rdr~e^{-r^2/2}\\
&=&2\pi.
\end{eqnarray*}
$$
Finally, the expression for $x$ from Eq. ([45](#eq:Greeny2)) is
$$
\begin{eqnarray*}
x(t>>\tau)&=&\frac{F_0\tau}{m\omega_0} \sqrt{2\pi} e^{-\omega_0^2\tau^2/2}\sin(\omega_0t).
\end{eqnarray*}
$$
## The classical pendulum and scaling the equations
Let us end our discussion of oscillations with another classical case, the pendulum.
The angular equation of motion of the pendulum is given by
Newton's equation and with no external force it reads
<!-- Equation labels as ordinary links -->
<div id="_auto32"></div>
$$
\begin{equation}
ml\frac{d^2\theta}{dt^2}+mgsin(\theta)=0,
\label{_auto32} \tag{46}
\end{equation}
$$
with an angular velocity and acceleration given by
<!-- Equation labels as ordinary links -->
<div id="_auto33"></div>
$$
\begin{equation}
v=l\frac{d\theta}{dt},
\label{_auto33} \tag{47}
\end{equation}
$$
and
<!-- Equation labels as ordinary links -->
<div id="_auto34"></div>
$$
\begin{equation}
a=l\frac{d^2\theta}{dt^2}.
\label{_auto34} \tag{48}
\end{equation}
$$
We do however expect that the motion will gradually come to an end due a viscous drag torque acting on the pendulum.
In the presence of the drag, the above equation becomes
<!-- Equation labels as ordinary links -->
<div id="eq:pend1"></div>
$$
\begin{equation}
ml\frac{d^2\theta}{dt^2}+\nu\frac{d\theta}{dt} +mgsin(\theta)=0, \label{eq:pend1} \tag{49}
\end{equation}
$$
where $\nu$ is now a positive constant parameterizing the viscosity
of the medium in question. In order to maintain the motion against
viscosity, it is necessary to add some external driving force.
We choose here a periodic driving force. The last equation becomes then
<!-- Equation labels as ordinary links -->
<div id="eq:pend2"></div>
$$
\begin{equation}
ml\frac{d^2\theta}{dt^2}+\nu\frac{d\theta}{dt} +mgsin(\theta)=Asin(\omega t), \label{eq:pend2} \tag{50}
\end{equation}
$$
with $A$ and $\omega$ two constants representing the amplitude and
the angular frequency respectively. The latter is called the driving frequency.
We define
$$
\omega_0=\sqrt{g/l},
$$
the so-called natural frequency and the new dimensionless quantities
$$
\hat{t}=\omega_0t,
$$
with the dimensionless driving frequency
$$
\hat{\omega}=\frac{\omega}{\omega_0},
$$
and introducing the quantity $Q$, called the *quality factor*,
$$
Q=\frac{mg}{\omega_0\nu},
$$
and the dimensionless amplitude
$$
\hat{A}=\frac{A}{mg}
$$
## More on the Pendulum
We have
$$
\frac{d^2\theta}{d\hat{t}^2}+\frac{1}{Q}\frac{d\theta}{d\hat{t}}
+sin(\theta)=\hat{A}cos(\hat{\omega}\hat{t}).
$$
This equation can in turn be recast in terms of two coupled first-order differential equations as follows
$$
\frac{d\theta}{d\hat{t}}=\hat{v},
$$
and
$$
\frac{d\hat{v}}{d\hat{t}}=-\frac{\hat{v}}{Q}-sin(\theta)+\hat{A}cos(\hat{\omega}\hat{t}).
$$
These are the equations to be solved. The factor $Q$ represents the
number of oscillations of the undriven system that must occur before
its energy is significantly reduced due to the viscous drag. The
amplitude $\hat{A}$ is measured in units of the maximum possible
gravitational torque while $\hat{\omega}$ is the angular frequency of
the external torque measured in units of the pendulum's natural
frequency. | cc0-1.0 |
toobaz/pandas | pandas/tests/test_downstream.py | 2 | 4880 | """
Testing that we work in the downstream packages
"""
import importlib
import subprocess
import sys
import numpy as np # noqa
import pytest
from pandas.compat import PY36
from pandas import DataFrame, Series
from pandas.util import testing as tm
def import_module(name):
# we *only* want to skip if the module is truly not available
# and NOT just an actual import error because of pandas changes
if PY36:
try:
return importlib.import_module(name)
except ModuleNotFoundError: # noqa
pytest.skip("skipping as {} not available".format(name))
else:
try:
return importlib.import_module(name)
except ImportError as e:
if "No module named" in str(e) and name in str(e):
pytest.skip("skipping as {} not available".format(name))
raise
@pytest.fixture
def df():
return DataFrame({"A": [1, 2, 3]})
def test_dask(df):
toolz = import_module("toolz") # noqa
dask = import_module("dask") # noqa
import dask.dataframe as dd
ddf = dd.from_pandas(df, npartitions=3)
assert ddf.A is not None
assert ddf.compute() is not None
def test_xarray(df):
xarray = import_module("xarray") # noqa
assert df.to_xarray() is not None
def test_oo_optimizable():
# GH 21071
subprocess.check_call([sys.executable, "-OO", "-c", "import pandas"])
@tm.network
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't:ImportWarning")
def test_statsmodels():
statsmodels = import_module("statsmodels") # noqa
import statsmodels.api as sm
import statsmodels.formula.api as smf
df = sm.datasets.get_rdataset("Guerry", "HistData").data
smf.ols("Lottery ~ Literacy + np.log(Pop1831)", data=df).fit()
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't:ImportWarning")
def test_scikit_learn(df):
sklearn = import_module("sklearn") # noqa
from sklearn import svm, datasets
digits = datasets.load_digits()
clf = svm.SVC(gamma=0.001, C=100.0)
clf.fit(digits.data[:-1], digits.target[:-1])
clf.predict(digits.data[-1:])
# Cython import warning and traitlets
@tm.network
@pytest.mark.filterwarnings("ignore")
def test_seaborn():
seaborn = import_module("seaborn")
tips = seaborn.load_dataset("tips")
seaborn.stripplot(x="day", y="total_bill", data=tips)
def test_pandas_gbq(df):
pandas_gbq = import_module("pandas_gbq") # noqa
@pytest.mark.xfail(reason="0.7.0 pending")
@tm.network
def test_pandas_datareader():
pandas_datareader = import_module("pandas_datareader") # noqa
pandas_datareader.DataReader("F", "quandl", "2017-01-01", "2017-02-01")
# importing from pandas, Cython import warning
@pytest.mark.filterwarnings("ignore:The 'warn':DeprecationWarning")
@pytest.mark.filterwarnings("ignore:pandas.util:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
@pytest.mark.skip(reason="gh-25778: geopandas stack issue")
def test_geopandas():
geopandas = import_module("geopandas") # noqa
fp = geopandas.datasets.get_path("naturalearth_lowres")
assert geopandas.read_file(fp) is not None
def test_geopandas_coordinate_indexer():
# this test is included to have coverage of one case in the indexing.py
# code that is only kept for compatibility with geopandas, see
# https://github.com/pandas-dev/pandas/issues/27258
# We should be able to remove this after some time when its usage is
# removed in geopandas
from pandas.core.indexing import _NDFrameIndexer
class _CoordinateIndexer(_NDFrameIndexer):
def _getitem_tuple(self, tup):
obj = self.obj
xs, ys = tup
return obj[xs][ys]
Series._create_indexer("cx", _CoordinateIndexer)
s = Series(range(5))
res = s.cx[:, :]
tm.assert_series_equal(s, res)
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
def test_pyarrow(df):
pyarrow = import_module("pyarrow") # noqa
table = pyarrow.Table.from_pandas(df)
result = table.to_pandas()
tm.assert_frame_equal(result, df)
@pytest.mark.xfail(reason="pandas-wheels-50", strict=False)
def test_missing_required_dependency():
# GH 23868
# To ensure proper isolation, we pass these flags
# -S : disable site-packages
# -s : disable user site-packages
# -E : disable PYTHON* env vars, especially PYTHONPATH
# And, that's apparently not enough, so we give up.
# https://github.com/MacPython/pandas-wheels/pull/50
call = ["python", "-sSE", "-c", "import pandas"]
with pytest.raises(subprocess.CalledProcessError) as exc:
subprocess.check_output(call, stderr=subprocess.STDOUT)
output = exc.value.stdout.decode()
for name in ["numpy", "pytz", "dateutil"]:
assert name in output
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.