repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/pandas/io/tests/parser/header.py
|
7
|
9186
|
# -*- coding: utf-8 -*-
"""
Tests that the file header is properly handled or inferred
during parsing for all of the parsers defined in parsers.py
"""
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Index, MultiIndex
from pandas.compat import StringIO, lrange, u
class HeaderTests(object):
def test_read_with_bad_header(self):
errmsg = r"but only \d+ lines in file"
with tm.assertRaisesRegexp(ValueError, errmsg):
s = StringIO(',,')
self.read_csv(s, header=[10])
def test_bool_header_arg(self):
# see gh-6114
data = """\
MyColumn
a
b
a
b"""
for arg in [True, False]:
with tm.assertRaises(TypeError):
self.read_csv(StringIO(data), header=arg)
with tm.assertRaises(TypeError):
self.read_table(StringIO(data), header=arg)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
tm.assert_almost_equal(df_pref.values, expected)
self.assert_index_equal(df_pref.columns,
Index(['Field0', 'Field1', 'Field2',
'Field3', 'Field4']))
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# INVALID OPTIONS
# no as_recarray
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
self.assertRaises(ValueError, self.read_csv,
StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True,
tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv,
StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'],
tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv,
StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'],
tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv,
StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples(
[('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array(
[[2, 3, 4, 5, 6], [8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')],
[u('r'), u('s'), u('t'),
u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array(
[[2, 3, 4, 5, 6], [8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')],
[u('r'), u('s'), u('t'),
u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array(
[[3, 4, 5, 6], [9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')],
[u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2], [0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_read_only_header_no_rows(self):
# See gh-7773
expected = DataFrame(columns=['a', 'b', 'c'])
df = self.read_csv(StringIO('a,b,c'))
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO('a,b,c'), index_col=False)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_index_equal(df_pref.columns,
Index(['X0', 'X1', 'X2', 'X3', 'X4']))
self.assert_index_equal(df.columns, Index(lrange(5)))
self.assert_index_equal(df2.columns, Index(names))
|
gpl-3.0
|
glouppe/scikit-learn
|
examples/linear_model/plot_robust_fit.py
|
26
|
2701
|
"""
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worst than OLS.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn import linear_model, metrics
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', linear_model.LinearRegression()),
('Theil-Sen', linear_model.TheilSenRegressor(random_state=42)),
('RANSAC', linear_model.RANSACRegressor(random_state=42)), ]
colors = {'OLS': 'turquoise', 'Theil-Sen': 'gold', 'RANSAC': 'lightgreen'}
linestyle = {'OLS': '-', 'Theil-Sen': '-.', 'RANSAC': '--'}
lw = 3
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling Errors Only', X, y),
('Corrupt X, Small Deviants', X_errors, y),
('Corrupt y, Small Deviants', X, y_errors),
('Corrupt X, Large Deviants', X_errors_large, y),
('Corrupt y, Large Deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'b+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = metrics.mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot, color=colors[name], linestyle=linestyle[name],
linewidth=lw, label='%s: error = %.3f' % (name, mse))
legend_title = 'Error of Mean\nAbsolute Deviation\nto Non-corrupt Data'
legend = plt.legend(loc='upper right', frameon=False, title=legend_title,
prop=dict(size='x-small'))
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
|
bsd-3-clause
|
Tjorriemorrie/trading
|
16_rf_ma/beta/predict.py
|
4
|
4193
|
import logging
import argparse
import os
import pickle
import pandas as pd
import numpy as np
from prettytable import PrettyTable
from random import random, choice, shuffle, randint
from pprint import pprint
from time import time, sleep
from main import loadData, loadQ, getBackgroundKnowledge, summarizeActions, calculateActions
from world import DATA, PERIODS, getState, getReward
def main(equity, debug):
pips = []
pt = PrettyTable(['Currency', 'min trail', 'date', '1', '2', '3', '4', '5'])
for info in DATA:
currency = info['currency']
min_trail = info['trail']
interval = info['intervals'][0]
pip_mul = info['pip_mul']
logging.warn('{0}...'.format(currency))
actions = calculateActions(min_trail)
df = loadData(currency, interval, 'test')
df = getBackgroundKnowledge(df, PERIODS)
# print df
# break
q = loadQ(currency, interval)
df_last = df[-1:]
row = df_last.iloc[-1]
predictions = predict(df, q, PERIODS, actions, pip_mul, row)
# logging.warn('{0} {1} {2}'.format(currency, row.name, a))
pt.add_row([currency, min_trail, str(row.name).split(' ')[0]] + predictions)
pips.append(int(predictions[0].split(' ')[0].split('-')[1]))
print pt
equity = float(equity)
risk = 0.10
available = equity * risk
logging.info('Risk ${0:.0f} from ${1:.0f} at {2:.0f}%'.format(available, equity, risk * 100))
total_pips = sum(pips)
lot_size = available / total_pips
lot_size /= len(pips)
logging.warn('Lot size = {0:.2f}'.format(lot_size))
########################################################################################################
# SARSA
########################################################################################################
def predict(df, q, periods, actions, pip_mul, row):
logging.info('Predict: started...')
# state
s = getState(df, periods)
# get top actions
predictions = []
for n in xrange(5):
a, q_sa = getAction(q, s, 0, actions)
a_trade, a_trail = a.split('-')
if a_trade == 'buy':
stop_loss = row['close'] - (float(a_trail) / pip_mul)
else:
stop_loss = row['close'] + (float(a_trail) / pip_mul)
predictions.append('{0} [{1:.4f}] SL:{2:0.4f}'.format(a, q_sa, stop_loss))
logging.info('{0} action = {1}'.format(n, a))
actions.remove(a)
return predictions
def getAction(q, s, epsilon, actions):
logging.info('Action: finding...')
# exploration
if random() < epsilon:
logging.debug('Action: explore (<{0:.2f})'.format(epsilon))
a = choice(actions)
q_max = None
# exploitation
else:
logging.debug('Action: exploit (>{0:.2f})'.format(epsilon))
q_max = None
for action in actions:
q_sa = q.get('|'.join([s, action]), random() * 10.)
logging.debug('Qsa action {0} is {1:.4f}'.format(action, q_sa))
if q_sa > q_max:
q_max = q_sa
a = action
logging.info('Action: found {0}'.format(a))
return a, q_max
def getDelta(q, s, a, r):
logging.info('Delta: calculating...')
q_sa = q.get('|'.join([s, a]), 0)
logging.debug('Delta: r [{0:.4f}] - Qsa [{1:0.4f}]'.format(r, q_sa))
d = r - q_sa
logging.info('Delta: {0:.4f}'.format(d))
return d
########################################################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('equity')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('-vv', '--very_verbose', action='store_true')
args = parser.parse_args()
verbose = args.verbose
very_verbose = args.very_verbose
lvl = logging.DEBUG if very_verbose else (logging.INFO if verbose else logging.WARN)
logging.basicConfig(
level=lvl,
format='%(asctime)s %(name)-8s %(levelname)-8s %(message)s',
# datefmt='%Y-%m-%d %H:%M:',
)
debug = verbose or very_verbose
main(args.equity, debug)
|
mit
|
google-research/language
|
language/serene/analysis.py
|
1
|
10366
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# List as: python3
"""Analyze and interpret model results.
This script does the following:
1. Reads TFDS examples.
2. Reads a model's predictions (formatted like below).
3. Merges these two so that it is easy to find the evidence the model scored
by claim_id.
4. Compute counts by different factors and output the rank of the first correct
prediction.
5. Write 3 and 4 to disk.
Model predictions are formatted like:
{'claim_prob': [0.7085524797439575, 0.15355344116687775, 0.13789407908916473],
'evidence_prob': [0.0008763210498727858],
'metadata': {'claim_id': 107511,
'claim_label': 'REFUTES',
'evidence_label': 'NOT_MATCHING',
'doc_score': -1,
'sentence_score': -20,
'scrape_type': 'ukp_pred',
'gold': False,
'retrieved': True,
'background': False,
'tfidf_candidate': True,
'wikipedia_url': 'Deighton',
'sentence_id': 20}}
"""
import json
import pathlib
import pickle
import sys
from absl import app
from absl import flags
from absl import logging
import dataclasses
from language.serene import config
from language.serene import constants
from language.serene import fever_tfds
from language.serene import types
from language.serene import util
import pandas as pd
import tensorflow.compat.v2 as tf
import tqdm
FLAGS = flags.FLAGS
flags.DEFINE_string('model_root', None, 'Directory of model to analyze.')
flags.DEFINE_string('report_dir', None, 'Directory to write report files.')
flags.DEFINE_integer('n_similar_negatives', 0,
'TFDS number of similar negatives.')
flags.DEFINE_integer('n_background_negatives', 10,
'TFDS number of background negatives.')
flags.DEFINE_string('train_scrape_type', 'lucene',
'Scrape used during training.')
@dataclasses.dataclass(frozen=True, eq=True)
class ClaimPrediction:
refute_probability: Optional[float]
support_probability: Optional[float]
not_enough_info_probability: Optional[float]
def get_match_score(model_predictions,
example_id):
if example_id in model_predictions:
return model_predictions[example_id]['evidence_prob'][0]
else:
return None
def get_verify_scores(model_predictions,
example_id):
if example_id in model_predictions:
scores = model_predictions[example_id]['claim_prob']
return ClaimPrediction(scores[0], scores[1], scores[2])
else:
return ClaimPrediction(None, None, None)
def make_example_id(*, claim_id, wikipedia_url,
sentence_id, scrape_type):
"""Create a string example id for claim-evidence pairs.
Args:
claim_id: Fever claim id
wikipedia_url: The wikipedia url of the evidence
sentence_id: The sentence id of the evidence
scrape_type: The scrape that this evidence came from
Returns:
A string example id
"""
return f'{claim_id}@{wikipedia_url}@{sentence_id}@{scrape_type}'
def parse_fold(*, fold_name, model_predictions,
tfds_examples):
"""Parse the examples in the model predictions.
Args:
fold_name: Name of fold that examples in rows are from
model_predictions: Map from example_id to predictions
tfds_examples: Examples from fever TFDS
Returns:
Dataframe merging the examples and predictions
"""
output_rows = []
for example in tqdm.tqdm(tfds_examples, mininterval=10):
meta = json.loads(util.tf_to_str(example['metadata']))
claim_id = meta['claim_id']
scrape_type = util.tf_to_str(example['scrape_type'])
wikipedia_url = util.tf_to_str(example['wikipedia_url'])
sentence_id = util.tf_to_str(example['sentence_id'])
ex_id = make_example_id(
claim_id=claim_id,
wikipedia_url=wikipedia_url,
sentence_id=sentence_id,
scrape_type=scrape_type,
)
model_score = get_match_score(model_predictions, ex_id)
verify_scores = get_verify_scores(model_predictions, ex_id)
# pyformat: disable
output_rows.append({
'evidence_label': constants.EVIDENCE_MATCHING_CLASSES[
example['evidence_label'].numpy()],
'claim_label': constants.FEVER_CLASSES[example['claim_label'].numpy()],
'scrape_type': scrape_type,
'wikipedia_url': wikipedia_url,
'sentence_id': sentence_id,
'retrieved': meta['retrieved'],
'gold': meta['gold'],
'background': meta['background'],
'sentence_score': meta['sentence_score'],
'doc_score': meta['doc_score'],
'tfidf_candidate': meta['tfidf_candidate'],
'claim_text': util.tf_to_str(example['claim_text']),
'evidence_text': util.tf_to_str(example['evidence_text']),
'claim_id': meta['claim_id'],
'model_score': model_score,
'refute_score': verify_scores.refute_probability,
'support_score': verify_scores.support_probability,
'nei_score': verify_scores.not_enough_info_probability,
'fold': fold_name,
})
# pyformat: enable
df = pd.DataFrame(output_rows)
return df
def read_model_predictions(
prediction_path):
"""Read a model's validation predictions and convert to a dictionary.
Args:
prediction_path: Path to read predictions from
Returns:
A dictionary where values are predictions, and keys are composed of
the claim_id/wikipedia_url/sentence_id/scrape_type
"""
model_predictions = util.read_json(prediction_path)
id_to_predictions = {}
for pred in model_predictions['predictions']:
claim_id = pred['metadata']['claim_id']
scrape_type = pred['metadata']['scrape_type']
wikipedia_url = pred['metadata']['wikipedia_url']
sentence_id = pred['metadata']['sentence_id']
identifier = make_example_id(
claim_id=claim_id,
wikipedia_url=wikipedia_url,
sentence_id=sentence_id,
scrape_type=scrape_type,
)
id_to_predictions[identifier] = pred
return id_to_predictions
def write_summary(report_dir, df):
"""Write summary statistics about the examples in df.
For example, this will output how many of each label there is of each type.
Args:
report_dir: Directory to write summary to
df: Dataframe of examples (claim/evidence pairs) to summarize
"""
pd.set_option('display.max_colwidth', 300)
with util.safe_open(report_dir / 'summary.txt', 'w') as f:
f.write('Counts of examples by Evidence label\n')
f.write(str(df.groupby('evidence_label').count()))
f.write('\n')
f.write('Count of examples by fold/scrape/evidence label/claim label\n')
f.write(
str(
df.groupby(['fold', 'scrape_type', 'evidence_label',
'claim_label']).count()))
f.write('\n')
f.write('Detailed Count of examples\n')
f.write(
str(
df.groupby([
'fold', 'scrape_type', 'evidence_label', 'claim_label',
'retrieved', 'gold', 'tfidf_candidate'
]).count()))
def write_per_claim_analysis(*, df,
claim_lookup,
output_path):
"""For each claim, write the examples the model scored and claim summary.
Args:
df: Dataframe to read predictions and examples from
claim_lookup: Lookup from claim_id to fever claim dictionary
output_path: Path to write analysis to
"""
claim_predictions = {}
grouped_df = df.groupby(['scrape_type', 'claim_id'])
for (scrape_type, claim_id), claim_df in tqdm.tqdm(
grouped_df, mininterval=10):
label = claim_lookup[claim_id]['label']
claim_df = claim_df.sort_values('model_score', ascending=False)
claim_df = claim_df[[
'gold', 'tfidf_candidate', 'model_score', 'support_score',
'refute_score', 'nei_score', 'wikipedia_url', 'sentence_id',
'evidence_text'
]]
recall_rank = sys.maxsize
for rank, row in enumerate(claim_df.itertuples(), start=1):
if row.gold:
recall_rank = rank
break
claim_predictions[(scrape_type, claim_id)] = {
'df': claim_df,
'claim_id': claim_id,
'scrape_type': scrape_type,
'label': label,
'rank': recall_rank,
}
with util.safe_open(output_path, 'wb') as f:
pickle.dump(claim_predictions, f)
def main(_):
tf.enable_v2_behavior()
flags.mark_flag_as_required('model_root')
flags.mark_flag_as_required('report_dir')
root = pathlib.Path(FLAGS.model_root)
report_dir = pathlib.Path(FLAGS.report_dir)
logging.info('Reading predictions from model_root: %s', root)
logging.info('Will write analysis to: %s', report_dir)
# Config() contains non-model specific configuration, which is why its
# fine to use this instead of the model's configuration.
conf = config.Config()
dev = {c['id']: c for c in util.read_jsonlines(conf.fever_dev)}
logging.info('Reading fever TFDS examples')
builder = fever_tfds.FeverEvidence(
data_dir=util.readahead(conf.fever_evidence_tfds_data),
n_similar_negatives=FLAGS.n_similar_negatives,
n_background_negatives=FLAGS.n_background_negatives,
train_scrape_type=FLAGS.train_scrape_type,
include_not_enough_info=True,
title_in_scoring=True)
val = builder.as_dataset(split='validation')
val_tfds_examples = [x for x in tqdm.tqdm(val, mininterval=10)]
logging.info('Reading model predictions')
model_predictions = read_model_predictions(root / 'val_predictions.json')
val_df = parse_fold(
fold_name='val',
model_predictions=model_predictions,
tfds_examples=val_tfds_examples)
df = pd.concat([val_df])
logging.info('Writing analysis to disk')
write_summary(report_dir, df)
write_per_claim_analysis(
output_path=report_dir / 'claim_evidence_predictions.pickle',
df=df,
claim_lookup=dev)
if __name__ == '__main__':
app.run(main)
|
apache-2.0
|
researchstudio-sat/wonpreprocessing
|
python-processing/tools/lda.py
|
1
|
1427
|
from gensim import matutils
from gensim.models import LdaModel
from gensim.models.hdpmodel import HdpModel
from sklearn.feature_extraction.text import TfidfVectorizer
from tools.datasets import dataset_mails
# TODO: move to scripts
def fit_lda(corpus, vocabulary, n_topics=10, passes=1):
return LdaModel(corpus, num_topics=n_topics, passes=passes,
id2word={i: s for i, s in enumerate(vocabulary)})
def fit_hdp_lda(corpus, vocabulary):
return HdpModel(corpus, {i: s for i, s in enumerate(vocabulary)})
if __name__ == '__main__':
content, input_type, tokenizer = dataset_mails(
'/Users/yanchith/workspace/won-corpora/processed')
# content, input_type, tokenizer = dataset_newsgroups()
vectorizer = TfidfVectorizer(min_df=3, input=input_type, ngram_range=(1, 1),
stop_words='english', tokenizer=tokenizer)
X = vectorizer.fit_transform(content)
features = vectorizer.get_feature_names()
print('Number of features:', len(features))
print('Bag of words shape:', X.shape)
print(features)
# Beware, gensim requires the matrix transposed
model = fit_hdp_lda(matutils.Sparse2Corpus(X, documents_columns=False),
features)
n_topics_to_show = 200
for topic in model.show_topics(topics=n_topics_to_show, topn=10,
formatted=True):
print(topic)
|
apache-2.0
|
musically-ut/statsmodels
|
statsmodels/tools/pca.py
|
25
|
31232
|
"""Principal Component Analysis
Author: josef-pktd
Modified by Kevin Sheppard
"""
from __future__ import print_function, division
import numpy as np
import pandas as pd
from statsmodels.compat.python import range
from statsmodels.compat.numpy import nanmean
def _norm(x):
return np.sqrt(np.sum(x * x))
class PCA(object):
"""
Principal Component Analysis
Parameters
----------
data : array-like
Variables in columns, observations in rows
ncomp : int, optional
Number of components to return. If None, returns the as many as the
smaller of the number of rows or columns in data
standardize: bool, optional
Flag indicating to use standardized data with mean 0 and unit
variance. standardized being True implies demean. Using standardized
data is equivalent to computing principal components from the
correlation matrix of data
demean : bool, optional
Flag indicating whether to demean data before computing principal
components. demean is ignored if standardize is True. Demeaning data
but not standardizing is equivalent to computing principal components
from the covariance matrix of data
normalize : bool , optional
Indicates whether th normalize the factors to have unit inner product.
If False, the loadings will have unit inner product.
weights : array, optional
Series weights to use after transforming data according to standardize
or demean when computing the principal components.
gls : bool, optional
Flag indicating to implement a two-step GLS estimator where
in the first step principal components are used to estimate residuals,
and then the inverse residual variance is used as a set of weights to
estimate the final principal components. Setting gls to True requires
ncomp to be less then the min of the number of rows or columns
method : str, optional
Sets the linear algebra routine used to compute eigenvectors
'svd' uses a singular value decomposition (default).
'eig' uses an eigenvalue decomposition of a quadratic form
'nipals' uses the NIPALS algorithm and can be faster than SVD when
ncomp is small and nvars is large. See notes about additional changes
when using NIPALS
tol : float, optional
Tolerance to use when checking for convergence when using NIPALS
max_iter : int, optional
Maximum iterations when using NIPALS
missing : string
Method for missing data. Choices are
'drop-row' - drop rows with missing values
'drop-col' - drop columns with missing values
'drop-min' - drop either rows or columns, choosing by data retention
'fill-em' - use EM algorithm to fill missing value. ncomp should be
set to the number of factors required
tol_em : float
Tolerance to use when checking for convergence of the EM algorithm
max_em_iter : int
Maximum iterations for the EM algorithm
Attributes
----------
factors : array or DataFrame
nobs by ncomp array of of principal components (scores)
scores : array or DataFrame
nobs by ncomp array of of principal components - identical to factors
loadings : array or DataFrame
ncomp by nvar array of principal component loadings for constructing
the factors
coeff : array or DataFrame
nvar by ncomp array of principal component loadings for constructing
the projections
projection : array or DataFrame
nobs by var array containing the projection of the data onto the ncomp
estimated factors
rsquare : array or Series
ncomp array where the element in the ith position is the R-square
of including the fist i principal components. Note: values are
calculated on the transformed data, not the original data
ic : array or DataFrame
ncomp by 3 array containing the Bai and Ng (2003) Information
criteria. Each column is a different criteria, and each row
represents the number of included factors.
eigenvals : array or Series
nvar array of eigenvalues
eigenvecs : array or DataFrame
nvar by nvar array of eigenvectors
weights : array
nvar array of weights used to compute the principal components,
normalized to unit length
transformed_data : array
Standardized, demeaned and weighted data used to compute
principal components and related quantities
cols : array
Array of indices indicating columns used in the PCA
rows : array
Array of indices indicating rows used in the PCA
Methods
-------
plot_scree
Scree plot of the eigenvalues
plot_rsquare
Individual series R-squared plotted against the number of factors
project
Compute projection for a given number of factors
Examples
--------
Basic PCA using the correlation matrix of the data
>>> import numpy as np
>>> from statsmodels.tools.pca import PCA
>>> x = np.random.randn(100)[:, None]
>>> x = x + np.random.randn((100, 100))
>>> pc = PCA(x)
Note that the principal components are computed using a SVD and so the
correlation matrix is never constructed, unless method='eig'.
PCA using the covariance matrix of the data
>>> pc = PCA(x, standardize=False)
Limiting the number of factors returned to 1 computed using NIPALS
>>> pc = PCA(x, ncomp=1, method='nipals')
>>> pc.factors.shape
(100, 1)
Notes
-----
The default options perform principal component analysis on the
demeanded, unit variance version of data. Setting standardize to False will
instead onle demean, and setting both standardized and
demean to False will not alter the data.
Once the data have been transformed, the following relationships hold when
the number of components (ncomp) is the same as tne minimum of the number
of observation or the number of variables.
.. math:
X' X = V \\Lambda V'
.. math:
F = X V
.. math:
X = F V'
where X is the `data`, F is the array of principal components (`factors`
or `scores`), and V is the array of eigenvectors (`loadings`) and V' is
the array of factor coefficients (`coeff`).
When weights are provided, the principal components are computed from the
modified data
.. math:
\\Omega^{-\\frac{1}{2}} X
where :math:`\\Omega` is a diagonal matrix composed of the weights. For
example, when using the GLS version of PCA, the elements of :math:`\\Omega`
will be the inverse of the variances of the residuals from
.. math:
X - F V'
where the number of factors is less than the rank of X
.. [1] J. Bai and S. Ng, "Determining the number of factors in approximate
factor models," Econometrica, vol. 70, number 1, pp. 191-221, 2002
"""
def __init__(self, data, ncomp=None, standardize=True, demean=True,
normalize=True, gls=False, weights=None, method='svd',
missing=None, tol=5e-8, max_iter=1000, tol_em=5e-8,
max_em_iter=100, ):
self._index = None
self._columns = []
if isinstance(data, pd.DataFrame):
self._index = data.index
self._columns = data.columns
self.data = np.asarray(data)
# Store inputs
self._gls = gls
self._normalize = normalize
self._tol = tol
if not 0 < self._tol < 1:
raise ValueError('tol must be strictly between 0 and 1')
self._max_iter = max_iter
self._max_em_iter = max_em_iter
self._tol_em = tol_em
# Prepare data
self._standardize = standardize
self._demean = demean
self._nobs, self._nvar = self.data.shape
if weights is None:
weights = np.ones(self._nvar)
else:
weights = np.array(weights).flatten()
if weights.shape[0] != self._nvar:
raise ValueError('weights should have nvar elements')
weights = weights / np.sqrt((weights ** 2.0).mean())
self.weights = weights
# Check ncomp against maximum
min_dim = min(self._nobs, self._nvar)
self._ncomp = min_dim if ncomp is None else ncomp
if self._ncomp > min_dim:
import warnings
warn = 'The requested number of components is more than can be ' \
'computed from data. The maximum number of components is ' \
'the minimum of the number of observations or variables'
warnings.warn(warn)
self._ncomp = min_dim
self._method = method
if self._method == 'eig':
self._compute_eig = self._compute_using_eig
elif self._method == 'svd':
self._compute_eig = self._compute_using_svd
elif self._method == 'nipals':
self._compute_eig = self._compute_using_nipals
else:
raise ValueError('method is not known.')
self.rows = np.arange(self._nobs)
self.cols = np.arange(self._nvar)
# Handle missing
self._missing = missing
self._adjusted_data = self.data
if missing is not None:
self._adjust_missing()
# Update size
self._nobs, self._nvar = self._adjusted_data.shape
if self._ncomp == np.min(self.data.shape):
self._ncomp = np.min(self._adjusted_data.shape)
elif self._ncomp > np.min(self._adjusted_data.shape):
raise ValueError('When adjusting for missing values, user '
'provided ncomp must be no larger than the '
'smallest dimension of the '
'missing-value-adjusted data size.')
# Attributes and internal values
self._tss = 0.0
self._ess = None
self.transformed_data = None
self._mu = None
self._sigma = None
self._ess_indiv = None
self._tss_indiv = None
self.scores = self.factors = None
self.loadings = None
self.coeff = None
self.eigenvals = None
self.eigenvecs = None
self.projection = None
self.rsquare = None
self.ic = None
# Prepare data
self.transformed_data = self._prepare_data()
# Perform the PCA
self._pca()
if gls:
self._compute_gls_weights()
self.transformed_data = self._prepare_data()
self._pca()
# Final calculations
self._compute_rsquare_and_ic()
if self._index is not None:
self._to_pandas()
def _adjust_missing(self):
"""
Implements alternatives for handling missing values
"""
def keep_col(x):
index = np.logical_not(np.any(np.isnan(x), 0))
return x[:, index], index
def keep_row(x):
index = np.logical_not(np.any(np.isnan(x), 1))
return x[index, :], index
if self._missing == 'drop-col':
self._adjusted_data, index = keep_col(self.data)
self.cols = np.where(index)[0]
self.weights = self.weights[index]
elif self._missing == 'drop-row':
self._adjusted_data, index = keep_row(self.data)
self.rows = np.where(index)[0]
elif self._missing == 'drop-min':
drop_col, drop_col_index = keep_col(self.data)
drop_col_size = drop_col.size
drop_row, drop_row_index = keep_row(self.data)
drop_row_size = drop_row.size
if drop_row_size > drop_col_size:
self._adjusted_data = drop_row
self.rows = np.where(drop_row_index)[0]
else:
self._adjusted_data = drop_col
self.weights = self.weights[drop_col_index]
self.cols = np.where(drop_col_index)[0]
elif self._missing == 'fill-em':
self._adjusted_data = self._fill_missing_em()
else:
raise ValueError('missing method is not known.')
# Check adjusted data size
if self._adjusted_data.size == 0:
raise ValueError('Removal of missing values has eliminated all data.')
def _compute_gls_weights(self):
"""
Computes GLS weights based on percentage of data fit
"""
errors = self.transformed_data - np.asarray(self.projection)
if self._ncomp == self._nvar:
raise ValueError('gls can only be used when ncomp < nvar '
'so that residuals have non-zero variance')
var = (errors ** 2.0).mean(0)
weights = 1.0 / var
weights = weights / np.sqrt((weights ** 2.0).mean())
nvar = self._nvar
eff_series_perc = (1.0 / sum((weights / weights.sum()) ** 2.0)) / nvar
if eff_series_perc < 0.1:
eff_series = int(np.round(eff_series_perc * nvar))
import warnings
warn = 'Many series are being down weighted by GLS. Of the ' \
'{original} series, the GLS estimates are based on only ' \
'{effective} (effective) ' \
'series.'.format(original=nvar, effective=eff_series)
warnings.warn(warn)
self.weights = weights
def _pca(self):
"""
Main PCA routine
"""
self._compute_eig()
self._compute_pca_from_eig()
self.projection = self.project()
def __repr__(self):
string = self.__str__()
string = string[:-1]
string += ', id: ' + hex(id(self)) + ')'
return string
def __str__(self):
string = 'Principal Component Analysis('
string += 'nobs: ' + str(self._nobs) + ', '
string += 'nvar: ' + str(self._nvar) + ', '
if self._standardize:
kind = 'Standardize (Correlation)'
elif self._demean:
kind = 'Demean (Covariance)'
else:
kind = 'None'
string += 'transformation: ' + kind + ', '
if self._gls:
string += 'GLS, '
string += 'normalization: ' + str(self._normalize) + ', '
string += 'number of components: ' + str(self._ncomp) + ', '
string += 'method: ' + 'Eigenvalue' if self._method == 'eig' else 'SVD'
string += ')'
return string
def _prepare_data(self):
"""
Standardize or demean data.
"""
adj_data = self._adjusted_data
if np.all(np.isnan(adj_data)):
return np.empty(adj_data.shape[1]).fill(np.nan)
self._mu = nanmean(adj_data, axis=0)
self._sigma = np.sqrt(nanmean((adj_data - self._mu) ** 2.0, axis=0))
if self._standardize:
data = (adj_data - self._mu) / self._sigma
elif self._demean:
data = (adj_data - self._mu)
else:
data = adj_data
return data / np.sqrt(self.weights)
def _compute_using_svd(self):
"""SVD method to compute eigenvalues and eigenvecs"""
x = self.transformed_data
u, s, v = np.linalg.svd(x)
self.eigenvals = s ** 2.0
self.eigenvecs = v.T
def _compute_using_eig(self):
"""
Eigenvalue decomposition method to compute eigenvalues and eigenvectors
"""
x = self.transformed_data
self.eigenvals, self.eigenvecs = np.linalg.eigh(x.T.dot(x))
def _compute_using_nipals(self):
"""
NIPALS implementation to compute small number of eigenvalues and eigenvectors
"""
x = self.transformed_data
if self._ncomp > 1:
x = x + 0.0 # Copy
tol, max_iter, ncomp = self._tol, self._max_iter, self._ncomp
vals = np.zeros(self._ncomp)
vecs = np.zeros((self._nvar, self._ncomp))
for i in range(ncomp):
max_var_ind = np.argmax(x.var(0))
factor = x[:, [max_var_ind]]
_iter = 0
diff = 1.0
while diff > tol and _iter < max_iter:
vec = x.T.dot(factor) / (factor.T.dot(factor))
vec = vec / np.sqrt(vec.T.dot(vec))
factor_last = factor
factor = x.dot(vec) / (vec.T.dot(vec))
diff = _norm(factor - factor_last) / _norm(factor)
_iter += 1
vals[i] = (factor ** 2).sum()
vecs[:, [i]] = vec
if ncomp > 1:
x -= factor.dot(vec.T)
self.eigenvals = vals
self.eigenvecs = vecs
def _fill_missing_em(self):
"""
EM algorithm to fill missing values
"""
non_missing = np.logical_not(np.isnan(self.data))
# If nothing missing, return without altering the data
if np.all(non_missing):
return self.data
# 1. Standardized data as needed
data = self.transformed_data = self._prepare_data()
ncomp = self._ncomp
# 2. Check for all nans
col_non_missing = np.sum(non_missing, 1)
row_non_missing = np.sum(non_missing, 0)
if np.any(col_non_missing < ncomp) or np.any(row_non_missing < ncomp):
raise ValueError('Implementation requires that all columns and '
'all rows have at least ncomp non-missing values')
# 3. Get mask
mask = np.isnan(data)
# 4. Compute mean
mu = nanmean(data, 0)
# 5. Replace missing with mean
projection = np.ones((self._nobs, 1)) * mu
projection_masked = projection[mask]
data[mask] = projection_masked
# 6. Compute eigenvalues and fit
diff = 1.0
_iter = 0
while diff > self._tol_em and _iter < self._max_em_iter:
last_projection_masked = projection_masked
# Set transformed data to compute eigenvalues
self.transformed_data = data
# Call correct eig function here
self._compute_eig()
# Call function to compute factors and projection
self._compute_pca_from_eig()
projection = self.project(transform=False, unweight=False)
projection_masked = projection[mask]
data[mask] = projection_masked
delta = last_projection_masked - projection_masked
diff = _norm(delta) / _norm(projection_masked)
_iter += 1
# Must copy to avoid overwriting original data since replacing values
data = self._adjusted_data + 0.0
projection = self.project()
data[mask] = projection[mask]
return data
def _compute_pca_from_eig(self):
"""
Compute relevant statistics after eigenvalues have been computed
"""
# Ensure sorted largest to smallest
vals, vecs = self.eigenvals, self.eigenvecs
indices = np.argsort(vals)
indices = indices[::-1]
vals = vals[indices]
vecs = vecs[:, indices]
if (vals <= 0).any():
# Discard and warn
num_good = vals.shape[0] - (vals <= 0).sum()
if num_good < self._ncomp:
import warnings
warn = 'Only {num:d} eigenvalues are positive. The is the ' \
'maximum number of components that can be extracted.'
warnings.warn(warn.format(num=num_good))
self._ncomp = num_good
vals[num_good:] = np.finfo(np.float64).tiny
# Use ncomp for the remaining calculations
vals = vals[:self._ncomp]
vecs = vecs[:, :self._ncomp]
self.eigenvals, self.eigenvecs = vals, vecs
# Select correct number of components to return
self.scores = self.factors = self.transformed_data.dot(vecs)
self.loadings = vecs
self.coeff = vecs.T
if self._normalize:
self.coeff = (self.coeff.T * np.sqrt(vals)).T
self.factors /= np.sqrt(vals)
self.scores = self.factors
def _compute_rsquare_and_ic(self):
"""
Final statistics to compute
"""
# TSS and related calculations
# TODO: This needs careful testing, with and without weights, gls, standardized and demean
weights = self.weights
ss_data = self.transformed_data * np.sqrt(weights)
self._tss_indiv = np.sum(ss_data ** 2, 0)
self._tss = np.sum(self._tss_indiv)
self._ess = np.zeros(self._ncomp + 1)
self._ess_indiv = np.zeros((self._ncomp + 1, self._nvar))
for i in range(self._ncomp + 1):
# Projection in the same space as transformed_data
projection = self.project(ncomp=i, transform=False, unweight=False)
indiv_rss = (projection ** 2).sum(axis=0)
rss = indiv_rss.sum()
self._ess[i] = self._tss - rss
self._ess_indiv[i, :] = self._tss_indiv - indiv_rss
self.rsquare = 1.0 - self._ess / self._tss
# Information Criteria
ess = self._ess
invalid = ess <= 0 # Prevent log issues of 0
if invalid.any():
last_obs = (np.where(invalid)[0]).min()
ess = ess[:last_obs]
log_ess = np.log(ess)
r = np.arange(ess.shape[0])
nobs, nvar = self._nobs, self._nvar
sum_to_prod = (nobs + nvar) / (nobs * nvar)
min_dim = min(nobs, nvar)
penalties = np.array([sum_to_prod * np.log(1.0 / sum_to_prod),
sum_to_prod * np.log(min_dim),
np.log(min_dim) / min_dim])
penalties = penalties[:, None]
ic = log_ess + r * penalties
self.ic = ic.T
def project(self, ncomp=None, transform=True, unweight=True):
"""
Project series onto a specific number of factors
Parameters
----------
ncomp : int, optional
Number of components to use. If omitted, all components
initially computed are used.
Returns
-------
projection : array
nobs by nvar array of the projection onto ncomp factors
transform : bool
Flag indicating whether to return the projection in the original
space of the data (True, default) or in the space of the
standardized/demeaned data
unweight : bool
Flag indicating whether to undo the effects of the estimation
weights
Notes
-----
"""
# Projection needs to be scaled/shifted based on inputs
ncomp = self._ncomp if ncomp is None else ncomp
if ncomp > self._ncomp:
raise ValueError('ncomp must be smaller than the number of '
'components computed.')
factors = np.asarray(self.factors)
coeff = np.asarray(self.coeff)
projection = factors[:, :ncomp].dot(coeff[:ncomp, :])
if transform or unweight:
projection *= np.sqrt(self.weights)
if transform:
# Remove the weights, which do not depend on transformation
if self._standardize:
projection *= self._sigma
if self._standardize or self._demean:
projection += self._mu
if self._index is not None:
projection = pd.DataFrame(projection,
columns=self._columns,
index=self._index)
return projection
def _to_pandas(self):
"""
Returns pandas DataFrames for all values
"""
index = self._index
# Principal Components
num_zeros = np.ceil(np.log10(self._ncomp))
comp_str = 'comp_{0:0' + str(int(num_zeros)) + 'd}'
cols = [comp_str.format(i) for i in range(self._ncomp)]
df = pd.DataFrame(self.factors, columns=cols, index=index)
self.scores = self.factors = df
# Projections
df = pd.DataFrame(self.projection,
columns=self._columns,
index=index)
self.projection = df
# Weights
df = pd.DataFrame(self.coeff, index=cols, columns=self._columns)
self.coeff = df
# Loadings
df = pd.DataFrame(self.loadings, index=self._columns, columns=cols)
self.loadings = df
# eigenvals
self.eigenvals = pd.Series(self.eigenvals)
self.eigenvals.name = 'eigenvals'
# eigenvecs
vec_str = comp_str.replace('comp', 'eigenvec')
cols = [vec_str.format(i) for i in range(self.eigenvecs.shape[1])]
self.eigenvecs = pd.DataFrame(self.eigenvecs, columns=cols)
# R2
self.rsquare = pd.Series(self.rsquare)
self.rsquare.index.name = 'ncomp'
self.rsquare.name = 'rsquare'
# IC
self.ic = pd.DataFrame(self.ic, columns=['IC_p1', 'IC_p2', 'IC_p3'])
self.ic.index.name = 'ncomp'
def plot_scree(self, ncomp=None, log_scale=True, cumulative=False, ax=None):
"""
Plot of the ordered eigenvalues
Parameters
----------
ncomp : int, optional
Number of components ot include in the plot. If None, will
included the same as the number of components computed
log_scale : boot, optional
Flag indicating whether ot use a log scale for the y-axis
cumulative : bool, optional
Flag indicating whether to plot the eigenvalues or cumulative
eigenvalues
ax : Matplotlib axes instance, optional
An axes on which to draw the graph. If omitted, new a figure
is created
Returns
-------
fig : figure
Handle to the figure
"""
import statsmodels.graphics.utils as gutils
fig, ax = gutils.create_mpl_ax(ax)
ncomp = self._ncomp if ncomp is None else ncomp
vals = np.asarray(self.eigenvals)
vals = vals[:self._ncomp]
if cumulative:
vals = np.cumsum(vals)
if log_scale:
ax.set_yscale('log')
ax.plot(np.arange(ncomp), vals[: ncomp], 'bo')
ax.autoscale(tight=True)
xlim = np.array(ax.get_xlim())
sp = xlim[1] - xlim[0]
xlim += 0.02 * np.array([-sp, sp])
ax.set_xlim(xlim)
ylim = np.array(ax.get_ylim())
scale = 0.02
if log_scale:
sp = np.log(ylim[1] / ylim[0])
ylim = np.exp(np.array([np.log(ylim[0]) - scale * sp,
np.log(ylim[1]) + scale * sp]))
else:
sp = ylim[1] - ylim[0]
ylim += scale * np.array([-sp, sp])
ax.set_ylim(ylim)
ax.set_title('Scree Plot')
ax.set_ylabel('Eigenvalue')
ax.set_xlabel('Component Number')
fig.tight_layout()
return fig
def plot_rsquare(self, ncomp=None, ax=None):
"""
Box plots of the individual series R-square against the number of PCs
Parameters
----------
ncomp : int, optional
Number of components ot include in the plot. If None, will
plot the minimum of 10 or the number of computed components
ax : Matplotlib axes instance, optional
An axes on which to draw the graph. If omitted, new a figure
is created
Returns
-------
fig : figure
Handle to the figure
"""
import statsmodels.graphics.utils as gutils
fig, ax = gutils.create_mpl_ax(ax)
ncomp = 10 if ncomp is None else ncomp
ncomp = min(ncomp, self._ncomp)
# R2s in rows, series in columns
r2s = 1.0 - self._ess_indiv / self._tss_indiv
r2s = r2s[1:]
r2s = r2s[:ncomp]
ax.boxplot(r2s.T)
ax.set_title('Individual Input $R^2$')
ax.set_ylabel('$R^2$')
ax.set_xlabel('Number of Included Principal Components')
return fig
def pca(data, ncomp=None, standardize=True, demean=True, normalize=True,
gls=False, weights=None, method='svd'):
"""
Principal Component Analysis
Parameters
----------
data : array
Variables in columns, observations in rows.
ncomp : int, optional
Number of components to return. If None, returns the as many as the
smaller to the number of rows or columns of data.
standardize: bool, optional
Flag indicating to use standardized data with mean 0 and unit
variance. standardized being True implies demean.
demean : bool, optional
Flag indicating whether to demean data before computing principal
components. demean is ignored if standardize is True.
normalize : bool , optional
Indicates whether th normalize the factors to have unit inner
product. If False, the loadings will have unit inner product.
weights : array, optional
Series weights to use after transforming data according to standardize
or demean when computing the principal components.
gls : bool, optional
Flag indicating to implement a two-step GLS estimator where
in the first step principal components are used to estimate residuals,
and then the inverse residual variance is used as a set of weights to
estimate the final principal components
method : str, optional
Determines the linear algebra routine uses. 'eig', the default,
uses an eigenvalue decomposition. 'svd' uses a singular value
decomposition.
Returns
-------
factors : array or DataFrame
nobs by ncomp array of of principal components (also known as scores)
loadings : array or DataFrame
ncomp by nvar array of principal component loadings for constructing
the factors
projection : array or DataFrame
nobs by var array containing the projection of the data onto the ncomp
estimated factors
rsquare : array or Series
ncomp array where the element in the ith position is the R-square
of including the fist i principal components. The values are
calculated on the transformed data, not the original data.
ic : array or DataFrame
ncomp by 3 array containing the Bai and Ng (2003) Information
criteria. Each column is a different criteria, and each row
represents the number of included factors.
eigenvals : array or Series
nvar array of eigenvalues
eigenvecs : array or DataFrame
nvar by nvar array of eigenvectors
Notes
-----
This is a simple function wrapper around the PCA class. See PCA for more information
and additional methods.
"""
pc = PCA(data, ncomp=ncomp, standardize=standardize, demean=demean,
normalize=normalize, gls=gls, weights=weights, method=method)
return (pc.factors, pc.loadings, pc.projection, pc.rsquare, pc.ic,
pc.eigenvals, pc.eigenvecs)
|
bsd-3-clause
|
justincassidy/scikit-learn
|
sklearn/cross_decomposition/pls_.py
|
187
|
28507
|
"""
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <[email protected]>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
|
bsd-3-clause
|
JohnOmernik/pyetl
|
pyetl.py
|
1
|
22127
|
#!/usr/bin/python
from confluent_kafka import Consumer, KafkaError
import json
import re
import time
import datetime
import shutil
import gzip
import os
import sys
import random
if sys.version_info > (3,):
try:
import pandas as pd
from fastparquet import write as parqwrite
from fastparquet import ParquetFile
except:
print("Attempting to import pands or fastparquet failed - Parquet writer WILL fail - are you sure your container has this support??")
else:
try:
from pychbase import Connection, Table, Batch
except:
print("Attempting to import pychbase failed - MaprDB writer WILL fail - ar you sure your contianer has this support??")
# Variables - Should be setable by arguments at some point
envvars = {}
# envars['var'] = ['default', 'True/False Required', 'str/int']
# What type of destination will this instance be sending to
envvars['dest_type'] = ['', True, 'str'] # mapdb, parq, json
#Kafka/Streams
envvars['zookeepers'] = ['', False, 'str']
envvars['kafka_id'] = ['', False, 'str']
envvars['bootstrap_brokers'] = ['', False, 'str']
envvars['offset_reset'] = ['earliest', False, 'str']
envvars['group_id'] = ['', True, 'str']
envvars['topic'] = ['', True, 'str']
envvars['loop_timeout'] = ["5.0", False, 'flt']
# Field Creation - used as a basic way to create a field based on another field.
# If src is set and dest is not, the field is not created - Error occurs
# Example: src = ts, dst = ts_part, start = 0, end = 10. This would change a value like 2017-08-08T21:26:10.843768Z to 2017-08-08
envvars['derived_src'] = ['', False, 'str'] # The field to src
envvars['derived_dst'] = ['', False, 'str'] # The field to put in the dest
envvars['derived_start'] = [0, False, 'int'] # The position to start
envvars['derived_end'] = [0, False, 'int'] # The position to end
envvars['derived_req'] = [0, False, 'int'] # Fail if the addition/conversation fails
#Loop Control
envvars['rowmax'] = [50, False, 'int']
envvars['timemax'] = [60, False, 'int']
envvars['sizemax'] = [256000, False, 'int']
# Parquet Options
envvars['parq_offsets'] = [50000000, False, 'int']
envvars['parq_compress'] = ['SNAPPY', False, 'str']
envvars['parq_has_nulls'] = [1, False, 'bool']
envvars['parq_merge_file'] = [0, False, 'int']
# JSON Options
envvars['json_gz_compress'] = [0, False, 'bool'] # Not supported yet
# MapR-DB Options
envvars['maprdb_table_base'] = ['', True, 'str']
envvars['maprdb_row_key_fields'] = ['', True, 'str']
envvars['maprdb_row_key_delim'] = ['_', False, 'str']
envvars['maprdb_family_mapping'] = ['', True, 'str']
envvars['maprdb_create_table'] = [0, False, 'int']
envvars['maprdb_batch_enabled'] = [0, False, 'int']
envvars['maprdb_print_drill_view'] = [0, False, 'int']
#File Options
envvars['file_maxsize'] = [8000000, False, 'int']
envvars['file_uniq_env'] = ['HOSTNAME', False, 'str']
envvars['file_partition_field'] = ['day', False, 'str']
envvars['file_partmaxage'] = ['600', False, 'int']
envvars['file_unknownpart'] = ['unknown', False, 'str']
envvars['file_table_base'] = ['', True, 'str']
envvars['file_tmp_part_dir'] = ['.tmp', False, 'str']
envvars['file_write_live'] = [0, False, 'int']
# Bad Data Management
envvars['remove_fields_on_fail'] = [0, False, 'int'] # If Json fails to import, should we try to remove_fields based on 'REMOVE_FIELDS'
envvars['remove_fields'] = ['', False, 'str'] # Comma Sep list of fields to try to remove if failure on JSON import
# Debug
envvars['debug'] = [0, False, 'int']
#envvars['drop_req_body_on_error'] = [1, False, 'int']
loadedenv = {}
def main():
table_schema = {}
cf_schema = {}
cf_lookup = {}
table = None
global loadedenv
loadedenv = loadenv(envvars)
if loadedenv['dest_type'] != 'maprdb':
loadedenv['tmp_part'] = loadedenv['file_table_base'] + "/" + loadedenv['file_tmp_part_dir']
loadedenv['uniq_val'] = os.environ[loadedenv['file_uniq_env']]
if loadedenv['debug'] == 1:
print(json.dumps(loadedenv, sort_keys=True, indent=4, separators=(',', ': ')))
if loadedenv['derived_src'] != '' and loadedenv['derived_dst'] == '':
print("If adding a field, you must have a field name")
print("derived_src %s - derived_dst: %s" % (loadedenv['derived_src'], loadedenv['derived_dst']))
sys.exit(1)
if loadedenv['dest_type'] == 'parq':
if not sys.version_info > (3,):
print("Python 2 is not supported for Parquet Writer, please use Python 3")
sys.exit(1)
elif loadedenv['dest_type'] == 'maprdb':
if sys.version_info > (3,):
print("Python 3 is not supported for maprdb load please use Python 2")
sys.exit(1)
table_schema, cf_schema, cf_lookup = loadmaprdbschemas()
myview = drill_view(table_schema)
if loadedenv['debug'] >= 1 or loadedenv['maprdb_print_drill_view'] == 1:
print("Drill Shell View:")
print( myview)
if loadedenv['maprdb_print_drill_view'] == 1:
sys.exit(0)
if loadedenv['debug'] >= 1:
print("Schema provided:")
print(table_schema)
print("")
print("cf_lookip:")
print(cf_lookup)
connection = Connection()
try:
table = connection.table(loadedenv['maprdb_table_base'])
except:
if loadedenv['maprdb_create_table'] != 1:
print("Table not found and create table not set to 1 - Cannot proceed")
sys.exit(1)
else:
print("Table not found: Creating")
connection.create_table(loadedenv['maprdb_table_base'], cf_schema)
try:
table = connection.table(loadedenv['maprdb_table_base'])
except:
print("Couldn't find table, tried to create, still can't find, exiting")
sys.exit(1)
if not loadedenv['dest_type'] == 'maprdb':
if not os.path.isdir(loadedenv['tmp_part']):
os.makedirs(loadedenv['tmp_part'])
curfile = loadedenv['uniq_val'] + "_curfile." + loadedenv['dest_type']
# Get the Bootstrap brokers if it doesn't exist
if loadedenv['bootstrap_brokers'] == "":
if loadedenv['zookeepers'] == "":
print("Must specify either Bootstrap servers via BOOTSTRAP_BROKERS or Zookeepers via ZOOKEEPERS")
sys.exit(1)
mybs = bootstrap_from_zk(loadedenv['zookeepers'], loadedenv['kafka_id'])
else:
if loadedenv['bootstrap_brokers'] == 'mapr':
mybs = ''
if loadedenv['debug'] >= 1:
print (mybs)
# Create Consumer group to listen on the topic specified
c = Consumer({'bootstrap.servers': mybs, 'group.id': loadedenv['group_id'], 'default.topic.config': {'auto.offset.reset': loadedenv['offset_reset']}})
c.subscribe([loadedenv['topic']], on_assign=print_assignment)
# Initialize counters
rowcnt = 0
sizecnt = 0
lastwrite = int(time.time()) - 1
dataar = []
part_ledger = {}
# Listen for messages
running = True
while running:
curtime = int(time.time())
timedelta = curtime - lastwrite
try:
message = c.poll(timeout=loadedenv['loop_timeout'])
except KeyboardInterrupt:
print("\n\nExiting per User Request")
c.close()
sys.exit(0)
if message == None:
# No message was found but we still want to check our stuff
pass
elif not message.error():
rowcnt += 1
jmsg, errcode = returnJSONRecord(message)
if errcode == 0:
sizecnt += len(json.dumps(jmsg))
dataar.append(jmsg)
elif message.error().code() != KafkaError._PARTITION_EOF:
print("MyError: " + message.error())
running = False
break
# If our row count is over the max, our size is over the max, or time delta is over the max, write the group .
if (rowcnt >= loadedenv['rowmax'] or timedelta >= loadedenv['timemax'] or sizecnt >= loadedenv['sizemax']) and len(dataar) > 0:
if loadedenv['dest_type'] != 'maprdb':
part_ledger = writeFile(dataar, part_ledger, curfile, curtime, rowcnt, sizecnt, timedelta)
part_ledger = dumpPart(part_ledger, curtime)
else:
writeMapRDB(dataar, table, cf_lookup, rowcnt, sizecnt, timedelta)
rowcnt = 0
sizecnt = 0
lastwrite = curtime
dataar = []
c.close()
def writeMapRDB(dataar, table, cf_lookup, rowcnt, sizecnt, timedelta):
if loadedenv['maprdb_batch_enabled'] == 1:
batch = table.batch()
for r in dataar:
batch.put(db_rowkey(r), db_row(r, cf_lookup))
batch_errors = batch.send()
if batch_errors == 0:
if loadedenv['debug'] >= 1:
print("%s Write batch to %s at %s records - Size: %s - Seconds since last write: %s - NO ERRORS" % (datetime.datetime.now(), loadedenv['maprdb_table_base'], rowcnt, sizecnt, timedelta))
else:
print("Multiple errors on write - Errors: %s" % batch_errors)
sys.exit(1)
else:
bcnt = 0
for r in dataar:
bcnt += 1
try:
table.put(db_rowkey(r), db_row(r, cf_lookup))
except:
print("Failed on record with key: %s" % db_rowkey(r))
print(db_row(r, cf_lookup))
sys.exit(1)
if loadedenv['debug'] >= 1:
print("Pushed: %s rows" % rowcnt)
def dumpPart(pledger, curtime):
removekeys = []
for x in pledger.keys():
l = pledger[x][0]
s = pledger[x][1]
f = pledger[x][2]
fw = pledger[x][3]
base_dir = loadedenv['file_table_base'] + '/' + x
if not os.path.isdir(base_dir):
try:
os.makedirs(base_dir)
except:
print("Partition Create failed, it may have been already created for %s" % (base_dir))
if s > loadedenv['file_maxsize'] or (curtime - fw) > loadedenv['file_partmaxage']:
new_file_name = loadedenv['uniq_val'] + "_" + str(curtime) + "." + loadedenv['dest_type']
new_file = base_dir + "/" + new_file_name
if loadedenv['debug'] >= 1:
outreason = ""
if s > loadedenv['file_maxsize']:
outreason = "Max Size"
else:
outreason = "Max Age"
print("%s %s reached - Size: %s - Age: %s - Writing to %s" % (datetime.datetime.now(), outreason, s, curtime - l, new_file))
if loadedenv['dest_type'] == 'json':
if loadedenv['json_gz_compress'] == 1:
if loadedenv['debug'] >= 1:
print("Compressing json files")
with open(f, 'rb') as f_in:
with gzip.open(f + ".gz", 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(f)
f = f + ".gz"
new_file = new_file + ".gz"
shutil.move(f, new_file)
removekeys.append(x)
# If merge_file is 1 then we read in the whole parquet file and output it in one go to eliminate all the row groups from appending
if loadedenv['dest_type'] == 'parq':
if loadedenv['parq_merge_file'] == 1:
if loadedenv['debug'] >= 1:
print("%s Merging parqfile into to new parq file" % (datetime.datetime.now()))
inparq = ParquetFile(new_file)
inparqdf = inparq.to_pandas()
tmp_file = loadedenv['tmp_part'] + "/" + new_file_name
parqwrite(tmp_file, inparqdf, compression=loadedenv['parq_compress'], row_group_offsets=loadedenv['parq_offsets'], has_nulls=loadedenv['parq_has_nulls'])
shutil.move(tmp_file, new_file)
inparq = None
inparqdf = None
for y in removekeys:
del pledger[y]
return pledger
def writeFile(dataar, pledger, curfile, curtime, rowcnt, sizecnt, timedelta):
parts = []
if loadedenv['dest_type'] == 'parq':
parqdf = pd.DataFrame.from_records([l for l in dataar])
parts = parqdf[loadedenv['file_partition_field']].unique()
if len(parts) == 0:
print("Error: Records without Partition field - Using default Partition of %s" % loadedenv['file_unknownpart']) # Need to do better job here
parts.append(loadedenv['file_unknownpart'])
else:
parts = []
for x in dataar:
try:
p = x[loadedenv['file_partition_field']]
except:
p = loadedenv['file_unknownpart']
if not p in parts:
parts.append(p)
if loadedenv['debug'] >= 1:
print("%s Write Data batch to %s at %s records - Size: %s - Seconds since last write: %s - Partitions in this batch: %s" % (datetime.datetime.now(), curfile, rowcnt, sizecnt, timedelta, parts))
for part in parts:
if loadedenv['dest_type'] == 'parq':
partdf = parqdf[parqdf[loadedenv['file_partition_field']] == part]
else:
partar = []
for x in dataar:
try:
curpart = x[loadedenv['file_partition_field']]
except:
curpart = loadedenv['file_unknownpart']
if curpart == part:
partar.append(x)
if loadedenv['file_write_live'] == 1:
base_dir = loadedenv['file_table_base'] + "/" + part
else:
base_dir = loadedenv['file_table_base'] + "/" + loadedenv['file_tmp_part_dir'] + "/" + part
final_file = base_dir + "/" + curfile
if not os.path.isdir(base_dir):
try:
os.makedirs(base_dir)
except:
print("Partition Create failed, it may have been already created for %s" % (base_dir))
if loadedenv['debug'] >= 1:
print("----- Writing partition %s to %s" % (part, final_file))
if loadedenv['dest_type'] == 'parq':
if not os.path.exists(final_file):
parqwrite(final_file, partdf, compression=loadedenv['parq_compress'], row_group_offsets=loadedenv['parq_offsets'], has_nulls=loadedenv['parq_has_nulls'])
else:
parqwrite(final_file, partdf, compression=loadedenv['parq_compress'], row_group_offsets=loadedenv['parq_offsets'], has_nulls=loadedenv['parq_has_nulls'], append=True)
partdf = pd.DataFrame()
else:
fout = open(final_file, 'a')
for x in partar:
fout.write(json.dumps(x) + "\n")
fout.close()
partar = []
cursize = os.path.getsize(final_file)
if part in pledger:
firstwrite = pledger[part][3]
else:
firstwrite = curtime
ledger = [curtime, cursize, final_file, firstwrite]
pledger[part] = ledger
return pledger
# Take a Kafka Messaage object (m) and try to make a json record out of it
def returnJSONRecord(m):
retval = {}
failedjson = 0
# This is message let's add it to our queue
try:
# This may not be the best way to approach this.
retval = m.value().decode('ascii', errors='replace')
except:
print(m.value())
failedjson = 3
# Only write if we have a message
if retval != "" and failedjson == 0:
try:
retval = json.loads(retval)
except:
failedjson = 1
if loadedenv['remove_fields_on_fail'] == 1:
print("%s JSON Error likely due to binary in request - per config remove_field_on_fail - we are removing the the following fields and trying again" % (datetime.datetime.now()))
while failedjson == 1:
repval = m.value()
for f in loadedenv['remove_fields'].split(","):
print("Trying to remove: %s" % f)
if sys.version_info > (3,):
repval = re.sub(b'"' + f.encode() + b'":".+?","', b'"' + f.encode() + b'":"","', repval)
else:
repval = re.sub('"' + f + '":".+?","', '"' + f + '":"","', repval)
try:
retval = json.loads(repval.decode("ascii", errors='ignore'))
failedjson = 0
break
except:
print("Still could not force into json even after dropping %s" % f)
if failedjson == 1:
if loadedenv['debug'] == 1:
print(repval.decode("ascii", errors='ignore'))
failedjson = 2
if loadedenv['debug'] >= 1 and failedjson >= 1:
printJSONFail(m, retval)
if loadedenv['derived_src'] != "":
try:
s = loadedenv['derived_start']
e = loadedenv['derived_end']
srcval = retval[loadedenv['derived_src']]
if e != 0:
retval[loadedenv['derived_dst']] = srcval[s:e]
else:
retval[loadedenv['derived_dst']] = srcval[s:]
except:
print("Error converting field %s" % loadedenv['derived_src'])
if loadedenv['derived_req'] != 0:
print("Exiting due to derived_req being set")
sys.exit(1)
return retval, failedjson
def printJSONFail(m, val):
print ("JSON Error - Debug - Attempting to print")
print("Raw form kafka:")
try:
print(m.value())
except:
print("Raw message failed to print")
print("Ascii Decoded (Sent to json.dumps):")
try:
print(val)
except:
print("Ascii dump message failed to print")
def drill_view(tbl):
tbl_name = loadedenv['maprdb_table_base']
out = "CREATE OR REPLACE VIEW MYVIEW_OF_DATA as \n"
out = out + "select \n"
out = out + "CONVERT_FROM(`row_key`, 'UTF8') AS `tbl_row_key`,\n"
for cf in tbl.iterkeys():
for c in tbl[cf]:
out = out + "CONVERT_FROM(t.`%s`.`%s`, 'UTF8') as `%s`, \n" % (cf, c, c)
out = out[:-3] + "\n"
out = out + "FROM `%s` t\n" % tbl_name
return out
def db_rowkey(jrow):
out = ""
for x in loadedenv['maprdb_row_key_fields'].split(","):
v = ''
# When we don't have a lot of variance in our key generation, we can add a RANDOMROWKEYVAL to the row key
if x == "RANDOMROWKEYVAL":
v = str(random.randint(1,100000000))
else:
if jrow[x] == None:
v = ''
else:
try:
v = str(jrow[x])
except:
print(jrow)
sys.exit(1)
if out == "":
out = v
else:
out = out + loadedenv['maprdb_row_key_delim'] + v
return out
def db_row(jrow, cfl):
out ={}
for r in jrow:
v = ''
if jrow[r] == None:
v = ''
else:
try:
v = str(jrow[r])
except:
try:
v = jrow[r].encode('ascii', errors='ignore').decode()
except:
print("Field: %s" % r)
print(jrow)
out[cfl[r] + ":" + r] = v
return out
def loadmaprdbschemas():
table_schema = {}
cf_schema = {}
cf_lookup = {}
for x in loadedenv['maprdb_family_mapping'].split(";"):
o = x.split(":")
table_schema[o[0]] = o[1].split(",")
cf_schema[o[0]] = {}
for x in table_schema.iterkeys():
for c in table_schema[x]:
cf_lookup[c] = x
return table_schema, cf_schema, cf_lookup
def loadenv(evars):
print("Loading Environment Variables")
lenv = {}
val = None
for e in evars:
try:
val = os.environ[e.upper()]
except:
if evars[e][1] == True:
if e == 'dest_type':
print("Variables DEST_TYPE not found, this variable MUST be provided - exiting")
sys.exit(1)
val = None
else:
print("ENV Variable %s not found, but not required, using default of '%s'" % (e.upper(), evars[e][0]))
val = evars[e][0]
if evars[e][2] == 'int':
val = int(val)
if evars[e][2] == 'flt':
val = float(val)
if evars[e][2] == 'bool':
val = bool(val)
if val != None:
lenv[e] = val
d = lenv['dest_type']
if d != "maprdb":
other = "file"
else:
other = "notfile"
for e in evars:
if evars[e][1] == True:
if not e in lenv and e.find(d) != 0 and e.find(other) != 0:
print("ENV Variable %s is required and not provided - Exiting" % (e.upper()))
sys.exit(1)
return lenv
# Get our bootstrap string from zookeepers if provided
def bootstrap_from_zk(ZKs, kafka_id):
from kazoo.client import KazooClient
zk = KazooClient(hosts=ZKs,read_only=True)
zk.start()
brokers = zk.get_children('/%s/brokers/ids' % kafka_id)
BSs = ""
for x in brokers:
res = zk.get('/%s/brokers/ids/%s' % (kafka_id, x))
dj = json.loads(res[0].decode('utf-8'))
srv = "%s:%s" % (dj['host'], dj['port'])
if BSs == "":
BSs = srv
else:
BSs = BSs + "," + srv
zk.stop()
zk = None
return BSs
def print_assignment(consumer, partitions):
if loadedenv['debug'] >= 1:
print('Assignment of group to partitions %s' % partitions)
if __name__ == "__main__":
main()
|
apache-2.0
|
dwhswenson/mdtraj
|
mdtraj/nmr/shift_wrappers.py
|
3
|
12252
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Kyle A. Beauchamp
# Contributors: Robert McGibbon
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
#############################################################################
from __future__ import print_function, absolute_import
import os
import subprocess
from distutils.spawn import find_executable as _find_executable
from distutils.version import LooseVersion
import numpy as np
from mdtraj.utils import enter_temp_directory, import_
##############################################################################
# Globals
##############################################################################
# Possible names for the external commands -- these are expected
# to be found in the PATH.
SHIFTX2 = ['shiftx2.py']
SPARTA_PLUS = ['sparta+', 'SPARTA+', 'SPARTA+.linux']
PPM = ['ppm_linux_64.exe']
__all__ = ['chemical_shifts_shiftx2', 'chemical_shifts_ppm', 'chemical_shifts_spartaplus', "reindex_dataframe_by_atoms"]
def find_executable(names):
for possible in names:
result = _find_executable(possible)
if result is not None:
return result
return None
##############################################################################
# Code
##############################################################################
def compute_chemical_shifts(trj, model="shiftx2", **kwargs):
"""Predict chemical shifts of a trajectory using ShiftX2.
Parameters
----------
trj : Trajectory
Trajectory to predict shifts for.
model : str, optional, default="shiftx2"
The program to use for calculating chemical shifts. Must be one
of shiftx2, ppm, or sparta+
Returns
-------
results : pandas DataFrame
Dataframe containing results, with index consisting of
(resSeq, atom_name) pairs and columns for each frame in trj.
Notes
-----
You must have the appropriate chemical soft programs installed
and in your executable path.
Chemical shift prediction is for PROTEIN atoms; trajectory objects
with ligands, solvent, ions, or other non-protein components may give
UNKNOWN RESULTS.
Please cite the appropriate reference, see docstrings for chemical_shifts_*
for the various possible models.
"""
if model == "shiftx2":
return chemical_shifts_shiftx2(trj, **kwargs)
elif model == "ppm":
return chemical_shifts_ppm(trj, **kwargs)
elif model == "sparta+":
return chemical_shifts_spartaplus(trj, **kwargs)
else:
raise(ValueError("model must be one of shiftx2, ppm, or sparta+"))
def chemical_shifts_shiftx2(trj, pH=5.0, temperature=298.00):
"""Predict chemical shifts of a trajectory using ShiftX2.
Parameters
----------
trj : Trajectory
Trajectory to predict shifts for.
pH : float, optional, default=5.0
pH value which gets passed to the ShiftX2 predictor.
temperature : float, optional, default=298.00
Temperature which gets passed to the ShiftX2 predictor.
Returns
-------
results : pandas DataFrame
Dataframe containing results, with index consisting of
(resSeq, atom_name) pairs and columns for each frame in trj.
Notes
-----
You must have ShiftX2 available on your path; see (http://www.shiftx2.ca/).
Chemical shift prediction is for PROTEIN atoms; trajectory objects
with ligands, solvent, ions, or other non-protein components may give
UNKNOWN RESULTS.
Please cite the appropriate reference below.
References
----------
.. [1] Beomsoo Han, Yifeng Liu, Simon Ginzinger, and David Wishart.
"SHIFTX2: significantly improved protein chemical shift
prediction." J. Biomol. NMR, 50, 1 43-57 (2011)
"""
pd = import_('pandas')
binary = find_executable(SHIFTX2)
if binary is None:
raise OSError('External command not found. Looked for {} in PATH. '
'`chemical_shifts_shiftx2` requires the external program SHIFTX2, '
'available at http://www.shiftx2.ca/'.format(', '.join(SHIFTX2)))
results = []
with enter_temp_directory():
for i in range(trj.n_frames):
fn = './trj%d.pdb' % i
trj[i].save(fn)
subprocess.check_call([binary,
'-b', fn,
'-p', "{:.1f}".format(pH),
'-t', "{:.2f}".format(temperature),
])
d = pd.read_csv("./trj%d.pdb.cs" % i)
d.rename(columns={"NUM": "resSeq", "RES": "resName", "ATOMNAME": "name"}, inplace=True)
d["frame"] = i
results.append(d)
results = pd.concat(results)
if LooseVersion(pd.__version__) < LooseVersion('0.14.0'):
results = results.pivot_table(rows=["resSeq", "name"], cols="frame", values="SHIFT")
else:
results = results.pivot_table(index=["resSeq", "name"], columns="frame", values="SHIFT")
return results
def chemical_shifts_ppm(trj):
"""Predict chemical shifts of a trajectory using ppm.
Parameters
----------
trj : Trajectory
Trajectory to predict shifts for.
Returns
-------
results : pandas.DataFrame
Dataframe containing results, with index consisting of
(resSeq, atom_name) pairs and columns for each frame in trj.
Notes
-----
You must have ppm available on your path; see
(http://spin.ccic.ohio-state.edu/index.php/download/index).
Chemical shift prediction is for PROTEIN atoms; trajectory objects
with ligands, solvent, ions, or other non-protein components may give
UNKNOWN RESULTS.
Please cite the appropriate reference below.
References
----------
.. [1] Li, DW, and Bruschweiler, R. "PPM: a side-chain and backbone chemical
shift predictor for the assessment of protein conformational ensembles."
J Biomol NMR. 2012 Nov;54(3):257-65.
"""
pd = import_('pandas')
binary = find_executable(PPM)
first_resSeq = trj.top.residue(0).resSeq
if binary is None:
raise OSError('External command not found. Looked for %s in PATH. `chemical_shifts_ppm` requires the external program PPM, available at http://spin.ccic.ohio-state.edu/index.php/download/index' % ', '.join(PPM))
with enter_temp_directory():
trj.save("./trj.pdb")
#-para old is on order to use newer ppm versions
cmd = "%s -pdb trj.pdb -mode detail -para old" % binary
return_flag = os.system(cmd)
if return_flag != 0:
raise(IOError("Could not successfully execute command '%s', check your PPM installation or your input trajectory." % cmd))
d = pd.read_table("./bb_details.dat", index_col=False, header=None, sep="\s+").drop([3], axis=1)
d = d.rename(columns={0: "resSeq", 1: "resName", 2: "name"})
d["resSeq"] += first_resSeq - 1 # Fix bug in PPM that reindexes to 1
d = d.drop("resName", axis=1)
d = d.set_index(["resSeq", "name"])
d.columns = np.arange(trj.n_frames)
d.columns.name = "frame"
return d
def _get_lines_to_skip(filename):
"""Determine the number of comment lines in a SPARTA+ output file."""
format_string = """FORMAT %4d %4s %4s %9.3f %9.3f %9.3f %9.3f %9.3f %9.3f"""
handle = open(filename)
for i, line in enumerate(handle):
if line.find(format_string) != -1:
return i + 2
raise(Exception("No format string found in SPARTA+ file!"))
def chemical_shifts_spartaplus(trj, rename_HN=True):
"""Predict chemical shifts of a trajectory using SPARTA+.
Parameters
----------
trj : Trajectory
Trajectory to predict shifts for.
rename_HN : bool, optional, default=True
SPARTA+ calls the amide proton "HN" instead of the standard "H".
When True, this option renames the output as "H" to match the PDB
and BMRB nomenclature.
Returns
-------
results : pandas.DataFrame
Dataframe containing results, with index consisting of
(resSeq, atom_name) pairs and columns for each frame in trj.
Notes
-----
You must have SPARTA+ available on your path; see
(http://spin.niddk.nih.gov/bax/software/SPARTA+/). Also, the SPARTAP_DIR
environment variable must be set so that SPARTA+ knows where to find
its database files.
Chemical shift prediction is for PROTEIN atoms; trajectory objects
with ligands, solvent, ions, or other non-protein components may give
UNKNOWN RESULTS.
Please cite the appropriate reference below.
References
----------
.. [1] Shen, Y., and Bax, Ad. "SPARTA+: a modest improvement in empirical
NMR chemical shift prediction by means of an artificial neural network."
J. Biomol. NMR, 48, 13-22 (2010)
"""
pd = import_('pandas')
binary = find_executable(SPARTA_PLUS)
if binary is None:
raise OSError('External command not found. Looked for %s in PATH. `chemical_shifts_spartaplus` requires the external program SPARTA+, available at http://spin.niddk.nih.gov/bax/software/SPARTA+/' % ', '.join(SPARTA_PLUS))
names = ["resSeq", "resName", "name", "SS_SHIFT", "SHIFT", "RC_SHIFT", "HM_SHIFT", "EF_SHIFT", "SIGMA"]
with enter_temp_directory():
for i in range(trj.n_frames):
trj[i].save("./trj%d.pdb" % i)
subprocess.check_call([binary, '-in'] + ["trj{}.pdb".format(i) for i in range(trj.n_frames)]
+ ['-out', 'trj0_pred.tab'])
lines_to_skip = _get_lines_to_skip("trj0_pred.tab")
results = []
for i in range(trj.n_frames):
d = pd.read_table("./trj%d_pred.tab" % i, names=names, header=None, sep="\s+", skiprows=lines_to_skip)
d["frame"] = i
results.append(d)
results = pd.concat(results)
if rename_HN:
results.name[results.name == "HN"] = "H"
if LooseVersion(pd.__version__) < LooseVersion('0.14.0'):
results = results.pivot_table(rows=["resSeq", "name"], cols="frame", values="SHIFT")
else:
results = results.pivot_table(index=["resSeq", "name"], columns="frame", values="SHIFT")
return results
def reindex_dataframe_by_atoms(trj, frame):
"""Reindex chemical shift output to use atom number (serial) indexing.
Parameters
----------
trj : Trajectory
Trajectory to predict shifts for.
frame : pandas.DataFrame
Dataframe containing results, with index consisting of
(resSeq, atom_name) pairs and columns for each frame in trj.
Returns
-------
new_frame : pandas.DataFrame
Dataframe containing results, with index consisting of atom
indices (AKA the 'serial' entry in a PDB). Columns correspond to
each frame in trj.
Notes
-----
Be aware that this function may DROP predictions if the atom naming
is different between the input trajectory and the output of various
chemical shift prediction tools.
"""
top, bonds = trj.top.to_dataframe()
top["serial"] = top.index
top = top.set_index(["resSeq", "name"])
new_frame = frame.copy()
new_frame["serial"] = top.ix[new_frame.index].serial
new_frame = new_frame.dropna().reset_index().set_index("serial").drop(["resSeq", "name"], axis=1)
return new_frame
|
lgpl-2.1
|
weidel-p/nest-simulator
|
pynest/examples/sinusoidal_poisson_generator.py
|
5
|
5526
|
# -*- coding: utf-8 -*-
#
# sinusoidal_poisson_generator.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
#
"""
Sinusoidal poisson generator example
------------------------------------
This script demonstrates the use of the ``sinusoidal_poisson_generator``
and its different parameters and modes. The source code of the model
can be found in ``models/sinusoidal_poisson_generator.h``.
The script is structured into two parts and creates one common figure.
In Part 1, two instances of the ``sinusoidal_poisson_generator`` are
created with different parameters. Part 2 illustrates the effect of
the ``individual_spike_trains`` switch.
"""
###############################################################################
# We import the modules required to simulate, analyze and plot this example.
import nest
import matplotlib.pyplot as plt
import numpy as np
nest.ResetKernel() # in case we run the script multiple times from iPython
####################################################################################
# We create two instances of the ``sinusoidal_poisson_generator`` with two
# different parameter sets using ``Create``. Moreover, we create devices to
# record firing rates (``multimeter``) and spikes (``spike_detector``) and connect
# them to the generators using ``Connect``.
nest.SetKernelStatus({'resolution': 0.01})
g = nest.Create('sinusoidal_poisson_generator', n=2,
params=[{'rate': 10000.0,
'amplitude': 5000.0,
'frequency': 10.0,
'phase': 0.0},
{'rate': 0.0,
'amplitude': 10000.0,
'frequency': 5.0,
'phase': 90.0}])
m = nest.Create('multimeter', 2, {'interval': 0.1, 'record_from': ['rate']})
s = nest.Create('spike_detector', 2)
nest.Connect(m, g, 'one_to_one')
nest.Connect(g, s, 'one_to_one')
print(m.get())
nest.Simulate(200)
###############################################################################
# After simulating, the spikes are extracted from the ``spike_detector`` using
# ``GetStatus`` and plots are created with panels for the PST and ISI histograms.
colors = ['b', 'g']
for j in range(2):
ev = m[j].events
t = ev['times']
r = ev['rate']
sp = nest.GetStatus(s[j])[0]['events']['times']
plt.subplot(221)
h, e = np.histogram(sp, bins=np.arange(0., 201., 5.))
plt.plot(t, r, color=colors[j])
plt.step(e[:-1], h * 1000 / 5., color=colors[j], where='post')
plt.title('PST histogram and firing rates')
plt.ylabel('Spikes per second')
plt.subplot(223)
plt.hist(np.diff(sp), bins=np.arange(0., 1.005, 0.02),
histtype='step', color=colors[j])
plt.title('ISI histogram')
###############################################################################
# The kernel is reset and the number of threads set to 4.
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': 4})
###############################################################################
# A ``sinusoidal_poisson_generator`` with ``individual_spike_trains`` set to
# `True` is created and connected to 20 parrot neurons whose spikes are
# recorded by a ``spike_detector``. After simulating, a raster plot of the spikes
# is created.
g = nest.Create('sinusoidal_poisson_generator',
params={'rate': 100.0, 'amplitude': 50.0,
'frequency': 10.0, 'phase': 0.0,
'individual_spike_trains': True})
p = nest.Create('parrot_neuron', 20)
s = nest.Create('spike_detector')
nest.Connect(g, p, 'all_to_all')
nest.Connect(p, s, 'all_to_all')
nest.Simulate(200)
ev = s.events
plt.subplot(222)
plt.plot(ev['times'], ev['senders'] - min(ev['senders']), 'o')
plt.ylim([-0.5, 19.5])
plt.yticks([])
plt.title('Individual spike trains for each target')
###############################################################################
# The kernel is reset again and the whole procedure is repeated for a
# ``sinusoidal_poisson_generator`` with `individual_spike_trains` set to
# `False`. The plot shows that in this case, all neurons receive the same
# spike train from the ``sinusoidal_poisson_generator``.
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': 4})
g = nest.Create('sinusoidal_poisson_generator',
params={'rate': 100.0, 'amplitude': 50.0,
'frequency': 10.0, 'phase': 0.0,
'individual_spike_trains': False})
p = nest.Create('parrot_neuron', 20)
s = nest.Create('spike_detector')
nest.Connect(g, p, 'all_to_all')
nest.Connect(p, s, 'all_to_all')
nest.Simulate(200)
ev = s.events
plt.subplot(224)
plt.plot(ev['times'], ev['senders'] - min(ev['senders']), 'o')
plt.ylim([-0.5, 19.5])
plt.yticks([])
plt.title('One spike train for all targets')
plt.show()
|
gpl-2.0
|
perryjohnson/biplaneblade
|
sandia_blade_lib/layer_plane_angles_stn05.py
|
1
|
4751
|
"""Determine the layer plane angle of all the elements in a grid.
Author: Perry Roth-Johnson
Last modified: March 18, 2014
References:
http://stackoverflow.com/questions/3365171/calculating-the-angle-between-two-lines-without-having-to-calculate-the-slope/3366569#3366569
http://stackoverflow.com/questions/19295725/angle-less-than-180-between-two-segments-lines
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import lib.grid as gr
reload(gr)
import lib.abaqus_utils2 as au
reload(au)
import lib.vabs_utils as vu
reload(vu)
from shapely.geometry import Polygon, LineString
from descartes import PolygonPatch
# -----------------------------------------------
# update these parameters!
station_num = 5
# -----------------------------------------------
stn_str = 'stn{0:02d}'.format(station_num)
plt.close('all')
# create a figure
plt.figure(num='Station #{0:02d}'.format(station_num))
ax = plt.gcf().gca()
# element sets on the leading edge
# outer_edge_node_nums=[1,4], inner_edge_node_nums=[2,3]
list_of_LE_elementsets = [
'rbtrile',
'esgelle',
'estrile',
'isresle',
'istrile'
]
# element sets on the trailing edge
# outer_edge_node_nums=[3,2], inner_edge_node_nums=[4,1]
list_of_TE_elementsets = [
'teuniax',
'rbtrite',
'esgelte',
'estrite',
'isreste',
'istrite'
]
# element sets on the lower surface
# outer_edge_node_nums=[2,1], inner_edge_node_nums=[3,4]
list_of_lower_elementsets = [
'esgellr',
'estrilr',
'isreslr',
'istrilr',
'rbtrilr',
'rbtriscl',
'esgelscl',
'estriscl',
'isresscl',
'istriscl',
'sclower'
]
# element sets on the upper surface
# outer_edge_node_nums=[4,3], inner_edge_node_nums=[1,2]
list_of_upper_elementsets = [
'esgelur',
'estriur',
'isresur',
'istriur',
'rbtriur',
'rbtriscu',
'esgelscu',
'estriscu',
'isresscu',
'istriscu',
'scupper'
]
# import the initial grid object
fmt_grid = 'sandia_blade/' + stn_str + '/mesh_' + stn_str + '.abq'
g = au.AbaqusGrid(fmt_grid, debug_flag=True)
# update the grid object with all the layer plane angles
for elem in g.list_of_elements:
if elem.element_set in list_of_LE_elementsets:
elem.calculate_layer_plane_angle(outer_edge_node_nums=[1,4],
inner_edge_node_nums=[2,3])
elif elem.element_set in list_of_TE_elementsets:
elem.calculate_layer_plane_angle(outer_edge_node_nums=[3,2],
inner_edge_node_nums=[4,1])
elif elem.element_set in list_of_lower_elementsets:
elem.calculate_layer_plane_angle(outer_edge_node_nums=[2,1],
inner_edge_node_nums=[3,4])
elif elem.element_set in list_of_upper_elementsets:
elem.calculate_layer_plane_angle(outer_edge_node_nums=[4,3],
inner_edge_node_nums=[1,2])
else:
raise Warning("Element #{0} has no element set!".format(elem.elem_num))
# plot a small selection of elements to check the results
for elem in g.list_of_elements[::25]:
# for elem in g.list_of_elements[:150:5]:
elem.plot(label_nodes=False)
print elem.elem_num, elem.element_set, elem.theta1
# show the plot
plt.xlim([-3,3])
plt.ylim([-3,3])
ax.set_aspect('equal')
print ' ------------------------'
print ' LEGEND'
print ' magenta : inner edge'
print ' blue : outer edge'
print ' ------------------------'
# plt.figure(num='Station #{0:02d}, theta1 vs. elem_num'.format(
# station_num))
# enum=np.arange(g.number_of_elements)+1
# theta=np.zeros(g.number_of_elements)
# elemset=[]
# for i,elem in enumerate(g.list_of_elements):
# theta[i] = elem.theta1
# elemset.append(elem.element_set)
# plt.plot(enum,theta)
# plt.xlabel('element number [#]')
# plt.ylabel('theta1 [deg]')
# plt.grid('on')
plt.show()
# -----------------------------------------------------------------------------
# read layers.csv to determine the number of layers
layer_file = pd.read_csv('sandia_blade/layers.csv', index_col=0)
number_of_layers = len(layer_file)
# write the updated grid object to a VABS input file
fmt_vabs = 'sandia_blade/' + stn_str + '/mesh_' + stn_str + '.vabs'
f = vu.VabsInputFile(
vabs_filename=fmt_vabs,
grid=g,
material_filename='sandia_blade/materials.csv',
layer_filename='sandia_blade/layers.csv',
debug_flag=True,
flags={
'format' : 1,
'Timoshenko' : 1,
'recover' : 0,
'thermal' : 0,
'curve' : 0,
'oblique' : 0,
'trapeze' : 0,
'Vlasov' : 0
})
|
gpl-3.0
|
mmilutinovic1313/zipline-with-algorithms
|
zipline/protocol.py
|
3
|
17081
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
from six import iteritems, iterkeys
import pandas as pd
import numpy as np
from . utils.protocol_utils import Enum
from . utils.math_utils import nanstd, nanmean, nansum
from zipline.finance.trading import with_environment
from zipline.utils.algo_instance import get_algo_instance
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
# Datasource type should completely determine the other fields of a
# message with its type.
DATASOURCE_TYPE = Enum(
'AS_TRADED_EQUITY',
'MERGER',
'SPLIT',
'DIVIDEND',
'TRADE',
'TRANSACTION',
'ORDER',
'EMPTY',
'DONE',
'CUSTOM',
'BENCHMARK',
'COMMISSION',
'CLOSE_POSITION'
)
# Expected fields/index values for a dividend Series.
DIVIDEND_FIELDS = [
'declared_date',
'ex_date',
'gross_amount',
'net_amount',
'pay_date',
'payment_sid',
'ratio',
'sid',
]
# Expected fields/index values for a dividend payment Series.
DIVIDEND_PAYMENT_FIELDS = [
'id',
'payment_sid',
'cash_amount',
'share_count',
]
def dividend_payment(data=None):
"""
Take a dictionary whose values are in DIVIDEND_PAYMENT_FIELDS and return a
series representing the payment of a dividend.
Ids are assigned to each historical dividend in
PerformanceTracker.update_dividends. They are guaranteed to be unique
integers with the context of a single simulation. If @data is non-empty, a
id is required to identify the historical dividend associated with this
payment.
Additionally, if @data is non-empty, either data['cash_amount'] should be
nonzero or data['payment_sid'] should be an asset identifier and
data['share_count'] should be nonzero.
The returned Series is given its id value as a name so that concatenating
payments results in a DataFrame indexed by id. (Note, however, that the
name value is not used to construct an index when this series is returned
by function passed to `DataFrame.apply`. In such a case, pandas preserves
the index of the DataFrame on which `apply` is being called.)
"""
return pd.Series(
data=data,
name=data['id'] if data is not None else None,
index=DIVIDEND_PAYMENT_FIELDS,
dtype=object,
)
class Event(object):
def __init__(self, initial_values=None):
if initial_values:
self.__dict__ = initial_values
def __getitem__(self, name):
return getattr(self, name)
def __setitem__(self, name, value):
setattr(self, name, value)
def __delitem__(self, name):
delattr(self, name)
def keys(self):
return self.__dict__.keys()
def __eq__(self, other):
return hasattr(other, '__dict__') and self.__dict__ == other.__dict__
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "Event({0})".format(self.__dict__)
def to_series(self, index=None):
return pd.Series(self.__dict__, index=index)
class Order(Event):
pass
class Portfolio(object):
def __init__(self):
self.capital_used = 0.0
self.starting_cash = 0.0
self.portfolio_value = 0.0
self.pnl = 0.0
self.returns = 0.0
self.cash = 0.0
self.positions = Positions()
self.start_date = None
self.positions_value = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Portfolio({0})".format(self.__dict__)
def __getstate__(self):
state_dict = copy(self.__dict__)
# Have to convert to primitive dict
state_dict['positions'] = dict(self.positions)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Portfolio saved state is too old.")
self.positions = Positions()
self.positions.update(state.pop('positions'))
self.__dict__.update(state)
class Account(object):
'''
The account object tracks information about the trading account. The
values are updated as the algorithm runs and its keys remain unchanged.
If connected to a broker, one can update these values with the trading
account values as reported by the broker.
'''
def __init__(self):
self.settled_cash = 0.0
self.accrued_interest = 0.0
self.buying_power = float('inf')
self.equity_with_loan = 0.0
self.total_positions_value = 0.0
self.regt_equity = 0.0
self.regt_margin = float('inf')
self.initial_margin_requirement = 0.0
self.maintenance_margin_requirement = 0.0
self.available_funds = 0.0
self.excess_liquidity = 0.0
self.cushion = 0.0
self.day_trades_remaining = float('inf')
self.leverage = 0.0
self.net_leverage = 0.0
self.net_liquidation = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Account({0})".format(self.__dict__)
def __getstate__(self):
state_dict = copy(self.__dict__)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Account saved state is too old.")
self.__dict__.update(state)
class Position(object):
def __init__(self, sid):
self.sid = sid
self.amount = 0
self.cost_basis = 0.0 # per share
self.last_sale_price = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Position({0})".format(self.__dict__)
def __getstate__(self):
state_dict = copy(self.__dict__)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Protocol Position saved state is too old.")
self.__dict__.update(state)
class Positions(dict):
def __missing__(self, key):
pos = Position(key)
self[key] = pos
return pos
class SIDData(object):
# Cache some data on the class so that this is shared for all instances of
# siddata.
# The dt where we cached the history.
_history_cache_dt = None
# _history_cache is a a dict mapping fields to pd.DataFrames. This is the
# most data we have for a given field for the _history_cache_dt.
_history_cache = {}
# This is the cache that is used for returns. This will have a different
# structure than the other history cache as this is always daily.
_returns_cache_dt = None
_returns_cache = None
# The last dt that we needed to cache the number of minutes.
_minute_bar_cache_dt = None
# If we are in minute mode, there is some cost associated with computing
# the number of minutes that we need to pass to the bar count of history.
# This will remain constant for a given bar and day count.
# This maps days to number of minutes.
_minute_bar_cache = {}
def __init__(self, sid, initial_values=None):
self._sid = sid
self._freqstr = None
# To check if we have data, we use the __len__ which depends on the
# __dict__. Because we are foward defining the attributes needed, we
# need to account for their entrys in the __dict__.
# We will add 1 because we need to account for the _initial_len entry
# itself.
self._initial_len = len(self.__dict__) + 1
if initial_values:
self.__dict__.update(initial_values)
@property
def datetime(self):
"""
Provides an alias from data['foo'].datetime -> data['foo'].dt
`datetime` was previously provided by adding a seperate `datetime`
member of the SIDData object via a generator that wrapped the incoming
data feed and added the field to each equity event.
This alias is intended to be temporary, to provide backwards
compatibility with existing algorithms, but should be considered
deprecated, and may be removed in the future.
"""
return self.dt
def get(self, name, default=None):
return self.__dict__.get(name, default)
def __getitem__(self, name):
return self.__dict__[name]
def __setitem__(self, name, value):
self.__dict__[name] = value
def __len__(self):
return len(self.__dict__) - self._initial_len
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "SIDData({0})".format(self.__dict__)
def _get_buffer(self, bars, field='price', raw=False):
"""
Gets the result of history for the given number of bars and field.
This will cache the results internally.
"""
cls = self.__class__
algo = get_algo_instance()
now = algo.datetime
if now != cls._history_cache_dt:
# For a given dt, the history call for this field will not change.
# We have a new dt, so we should reset the cache.
cls._history_cache_dt = now
cls._history_cache = {}
if field not in self._history_cache \
or bars > len(cls._history_cache[field][0].index):
# If we have never cached this field OR the amount of bars that we
# need for this field is greater than the amount we have cached,
# then we need to get more history.
hst = algo.history(
bars, self._freqstr, field, ffill=True,
)
# Assert that the column holds ints, not security objects.
if not isinstance(self._sid, str):
hst.columns = hst.columns.astype(int)
self._history_cache[field] = (hst, hst.values, hst.columns)
# Slice of only the bars needed. This is because we strore the LARGEST
# amount of history for the field, and we might request less than the
# largest from the cache.
buffer_, values, columns = cls._history_cache[field]
if raw:
sid_index = columns.get_loc(self._sid)
return values[-bars:, sid_index]
else:
return buffer_[self._sid][-bars:]
def _get_bars(self, days):
"""
Gets the number of bars needed for the current number of days.
Figures this out based on the algo datafrequency and caches the result.
This caches the result by replacing this function on the object.
This means that after the first call to _get_bars, this method will
point to a new function object.
"""
def daily_get_max_bars(days):
return days
def minute_get_max_bars(days):
# max number of minute. regardless of current days or short
# sessions
return days * 390
def daily_get_bars(days):
return days
@with_environment()
def minute_get_bars(days, env=None):
cls = self.__class__
now = get_algo_instance().datetime
if now != cls._minute_bar_cache_dt:
cls._minute_bar_cache_dt = now
cls._minute_bar_cache = {}
if days not in cls._minute_bar_cache:
# Cache this calculation to happen once per bar, even if we
# use another transform with the same number of days.
prev = env.previous_trading_day(now)
ds = env.days_in_range(
env.add_trading_days(-days + 2, prev),
prev,
)
# compute the number of minutes in the (days - 1) days before
# today.
# 210 minutes in a an early close and 390 in a full day.
ms = sum(210 if d in env.early_closes else 390 for d in ds)
# Add the number of minutes for today.
ms += int(
(now - env.get_open_and_close(now)[0]).total_seconds() / 60
)
cls._minute_bar_cache[days] = ms + 1 # Account for this minute
return cls._minute_bar_cache[days]
if get_algo_instance().sim_params.data_frequency == 'daily':
self._freqstr = '1d'
# update this method to point to the daily variant.
self._get_bars = daily_get_bars
self._get_max_bars = daily_get_max_bars
else:
self._freqstr = '1m'
# update this method to point to the minute variant.
self._get_bars = minute_get_bars
self._get_max_bars = minute_get_max_bars
# Not actually recursive because we have already cached the new method.
return self._get_bars(days)
def mavg(self, days):
bars = self._get_bars(days)
max_bars = self._get_max_bars(days)
prices = self._get_buffer(max_bars, raw=True)[-bars:]
return nanmean(prices)
def stddev(self, days):
bars = self._get_bars(days)
max_bars = self._get_max_bars(days)
prices = self._get_buffer(max_bars, raw=True)[-bars:]
return nanstd(prices, ddof=1)
def vwap(self, days):
bars = self._get_bars(days)
max_bars = self._get_max_bars(days)
prices = self._get_buffer(max_bars, raw=True)[-bars:]
vols = self._get_buffer(max_bars, field='volume', raw=True)[-bars:]
vol_sum = nansum(vols)
try:
ret = nansum(prices * vols) / vol_sum
except ZeroDivisionError:
ret = np.nan
return ret
def returns(self):
algo = get_algo_instance()
now = algo.datetime
if now != self._returns_cache_dt:
self._returns_cache_dt = now
self._returns_cache = algo.history(2, '1d', 'price', ffill=True)
hst = self._returns_cache[self._sid]
return (hst.iloc[-1] - hst.iloc[0]) / hst.iloc[0]
class BarData(object):
"""
Holds the event data for all sids for a given dt.
This is what is passed as `data` to the `handle_data` function.
Note: Many methods are analogues of dictionary because of historical
usage of what this replaced as a dictionary subclass.
"""
def __init__(self, data=None):
self._data = data or {}
self._contains_override = None
def __contains__(self, name):
if self._contains_override:
if self._contains_override(name):
return name in self._data
else:
return False
else:
return name in self._data
def has_key(self, name):
"""
DEPRECATED: __contains__ is preferred, but this method is for
compatibility with existing algorithms.
"""
return name in self
def __setitem__(self, name, value):
self._data[name] = value
def __getitem__(self, name):
return self._data[name]
def __delitem__(self, name):
del self._data[name]
def __iter__(self):
for sid, data in iteritems(self._data):
# Allow contains override to filter out sids.
if sid in self:
if len(data):
yield sid
def iterkeys(self):
# Allow contains override to filter out sids.
return (sid for sid in iterkeys(self._data) if sid in self)
def keys(self):
# Allow contains override to filter out sids.
return list(self.iterkeys())
def itervalues(self):
return (value for _sid, value in self.iteritems())
def values(self):
return list(self.itervalues())
def iteritems(self):
return ((sid, value) for sid, value
in iteritems(self._data)
if sid in self)
def items(self):
return list(self.iteritems())
def __len__(self):
return len(self.keys())
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__, self._data)
|
apache-2.0
|
hugobowne/scikit-learn
|
sklearn/cluster/tests/test_dbscan.py
|
176
|
12155
|
"""
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
|
bsd-3-clause
|
bnaul/scikit-learn
|
sklearn/linear_model/_bayes.py
|
3
|
25564
|
"""
Various bayesian regression
"""
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from ._base import LinearModel, _rescale_data
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet
from scipy.linalg import pinvh
from ..utils.validation import _check_sample_weight
from ..utils.validation import _deprecate_positional_args
###############################################################################
# BayesianRidge regression
class BayesianRidge(RegressorMixin, LinearModel):
"""Bayesian ridge regression.
Fit a Bayesian ridge model. See the Notes section for details on this
implementation and the optimization of the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, default=300
Maximum number of iterations. Should be greater than or equal to 1.
tol : float, default=1e-3
Stop the algorithm if w has converged.
alpha_1 : float, default=1e-6
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter.
alpha_2 : float, default=1e-6
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
lambda_1 : float, default=1e-6
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter.
lambda_2 : float, default=1e-6
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
alpha_init : float, default=None
Initial value for alpha (precision of the noise).
If not set, alpha_init is 1/Var(y).
.. versionadded:: 0.22
lambda_init : float, default=None
Initial value for lambda (precision of the weights).
If not set, lambda_init is 1.
.. versionadded:: 0.22
compute_score : bool, default=False
If True, compute the log marginal likelihood at each iteration of the
optimization.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model.
The intercept is not treated as a probabilistic parameter
and thus has no associated variance. If set
to False, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
verbose : bool, default=False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array-like of shape (n_features,)
Coefficients of the regression model (mean of distribution)
intercept_ : float
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated precision of the noise.
lambda_ : float
Estimated precision of the weights.
sigma_ : array-like of shape (n_features, n_features)
Estimated variance-covariance matrix of the weights
scores_ : array-like of shape (n_iter_+1,)
If computed_score is True, value of the log marginal likelihood (to be
maximized) at each iteration of the optimization. The array starts
with the value of the log marginal likelihood obtained for the initial
values of alpha and lambda and ends with the value obtained for the
estimated alpha and lambda.
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
BayesianRidge()
>>> clf.predict([[1, 1]])
array([1.])
Notes
-----
There exist several strategies to perform Bayesian ridge regression. This
implementation is based on the algorithm described in Appendix A of
(Tipping, 2001) where updates of the regularization parameters are done as
suggested in (MacKay, 1992). Note that according to A New
View of Automatic Relevance Determination (Wipf and Nagarajan, 2008) these
update rules do not guarantee that the marginal likelihood is increasing
between two consecutive iterations of the optimization.
References
----------
D. J. C. MacKay, Bayesian Interpolation, Computation and Neural Systems,
Vol. 4, No. 3, 1992.
M. E. Tipping, Sparse Bayesian Learning and the Relevance Vector Machine,
Journal of Machine Learning Research, Vol. 1, 2001.
"""
@_deprecate_positional_args
def __init__(self, *, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, alpha_init=None,
lambda_init=None, compute_score=False, fit_intercept=True,
normalize=False, copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.alpha_init = alpha_init
self.lambda_init = lambda_init
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y, sample_weight=None):
"""Fit the model
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data
y : ndarray of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary
sample_weight : ndarray of shape (n_samples,), default=None
Individual weights for each sample
.. versionadded:: 0.20
parameter *sample_weight* support to BayesianRidge.
Returns
-------
self : returns an instance of self.
"""
if self.n_iter < 1:
raise ValueError('n_iter should be greater than or equal to 1.'
' Got {!r}.'.format(self.n_iter))
X, y = self._validate_data(X, y, dtype=np.float64, y_numeric=True)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X,
dtype=X.dtype)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
if sample_weight is not None:
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
n_samples, n_features = X.shape
# Initialization of the values of the parameters
eps = np.finfo(np.float64).eps
# Add `eps` in the denominator to omit division by zero if `np.var(y)`
# is zero
alpha_ = self.alpha_init
lambda_ = self.lambda_init
if alpha_ is None:
alpha_ = 1. / (np.var(y) + eps)
if lambda_ is None:
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
# Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
# update posterior mean coef_ based on alpha_ and lambda_ and
# compute corresponding rmse
coef_, rmse_ = self._update_coef_(X, y, n_samples, n_features,
XT_y, U, Vh, eigen_vals_,
alpha_, lambda_)
if self.compute_score:
# compute the log marginal likelihood
s = self._log_marginal_likelihood(n_samples, n_features,
eigen_vals_,
alpha_, lambda_,
coef_, rmse_)
self.scores_.append(s)
# Update alpha and lambda according to (MacKay, 1992)
gamma_ = np.sum((alpha_ * eigen_vals_) /
(lambda_ + alpha_ * eigen_vals_))
lambda_ = ((gamma_ + 2 * lambda_1) /
(np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1) /
(rmse_ + 2 * alpha_2))
# Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.n_iter_ = iter_ + 1
# return regularization parameters and corresponding posterior mean,
# log marginal likelihood and posterior covariance
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_, rmse_ = self._update_coef_(X, y, n_samples, n_features,
XT_y, U, Vh, eigen_vals_,
alpha_, lambda_)
if self.compute_score:
# compute the log marginal likelihood
s = self._log_marginal_likelihood(n_samples, n_features,
eigen_vals_,
alpha_, lambda_,
coef_, rmse_)
self.scores_.append(s)
self.scores_ = np.array(self.scores_)
# posterior covariance is given by 1/alpha_ * scaled_sigma_
scaled_sigma_ = np.dot(Vh.T,
Vh / (eigen_vals_ +
lambda_ / alpha_)[:, np.newaxis])
self.sigma_ = (1. / alpha_) * scaled_sigma_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
return_std : bool, default=False
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array-like of shape (n_samples,)
Mean of predictive distribution of query points.
y_std : array-like of shape (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self.normalize:
X = (X - self.X_offset_) / self.X_scale_
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
return y_mean, y_std
def _update_coef_(self, X, y, n_samples, n_features, XT_y, U, Vh,
eigen_vals_, alpha_, lambda_):
"""Update posterior mean and compute corresponding rmse.
Posterior mean is given by coef_ = scaled_sigma_ * X.T * y where
scaled_sigma_ = (lambda_/alpha_ * np.eye(n_features)
+ np.dot(X.T, X))^-1
"""
if n_samples > n_features:
coef_ = np.linalg.multi_dot([Vh.T,
Vh / (eigen_vals_ + lambda_ /
alpha_)[:, np.newaxis],
XT_y])
else:
coef_ = np.linalg.multi_dot([X.T,
U / (eigen_vals_ + lambda_ /
alpha_)[None, :],
U.T, y])
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
return coef_, rmse_
def _log_marginal_likelihood(self, n_samples, n_features, eigen_vals,
alpha_, lambda_, coef, rmse):
"""Log marginal likelihood."""
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
# compute the log of the determinant of the posterior covariance.
# posterior covariance is given by
# sigma = (lambda_ * np.eye(n_features) + alpha_ * np.dot(X.T, X))^-1
if n_samples > n_features:
logdet_sigma = - np.sum(np.log(lambda_ + alpha_ * eigen_vals))
else:
logdet_sigma = np.full(n_features, lambda_,
dtype=np.array(lambda_).dtype)
logdet_sigma[:n_samples] += alpha_ * eigen_vals
logdet_sigma = - np.sum(np.log(logdet_sigma))
score = lambda_1 * log(lambda_) - lambda_2 * lambda_
score += alpha_1 * log(alpha_) - alpha_2 * alpha_
score += 0.5 * (n_features * log(lambda_) +
n_samples * log(alpha_) -
alpha_ * rmse -
lambda_ * np.sum(coef ** 2) +
logdet_sigma -
n_samples * log(2 * np.pi))
return score
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(RegressorMixin, LinearModel):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, default=300
Maximum number of iterations.
tol : float, default=1e-3
Stop the algorithm if w has converged.
alpha_1 : float, default=1e-6
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter.
alpha_2 : float, default=1e-6
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
lambda_1 : float, default=1e-6
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter.
lambda_2 : float, default=1e-6
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
compute_score : bool, default=False
If True, compute the objective function at each step of the model.
threshold_lambda : float, default=10 000
threshold for removing (pruning) weights with high precision from
the computation.
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
verbose : bool, default=False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array-like of shape (n_features,)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array-like of shape (n_features,)
estimated precisions of the weights.
sigma_ : array-like of shape (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
intercept_ : float
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
ARDRegression()
>>> clf.predict([[1, 1]])
array([1.])
Notes
-----
For an example, see :ref:`examples/linear_model/plot_ard.py
<sphx_glr_auto_examples_linear_model_plot_ard.py>`.
References
----------
D. J. C. MacKay, Bayesian nonlinear modeling for the prediction
competition, ASHRAE Transactions, 1994.
R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
Their beta is our ``self.alpha_``
Their alpha is our ``self.lambda_``
ARD is a little different than the slide: only dimensions/features for
which ``self.lambda_ < self.threshold_lambda`` are kept and the rest are
discarded.
"""
@_deprecate_positional_args
def __init__(self, *, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values (integers). Will be cast to X's dtype if necessary
Returns
-------
self : returns an instance of self.
"""
X, y = self._validate_data(X, y, dtype=np.float64, y_numeric=True,
ensure_min_samples=2)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
# Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
# Initialization of the values of the parameters
eps = np.finfo(np.float64).eps
# Add `eps` in the denominator to omit division by zero if `np.var(y)`
# is zero
alpha_ = 1. / (np.var(y) + eps)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
def update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_):
coef_[keep_lambda] = alpha_ * np.linalg.multi_dot([
sigma_, X[:, keep_lambda].T, y])
return coef_
update_sigma = (self._update_sigma if n_samples >= n_features
else self._update_sigma_woodbury)
# Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda)
coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_)
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1) /
((coef_[keep_lambda]) ** 2 +
2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1) /
(rmse_ + 2. * alpha_2))
# Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
# Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_) +
np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
# Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
if not keep_lambda.any():
break
if keep_lambda.any():
# update sigma and mu using updated params from the last iteration
sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda)
coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_)
else:
sigma_ = np.array([]).reshape(0, 0)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def _update_sigma_woodbury(self, X, alpha_, lambda_, keep_lambda):
# See slides as referenced in the docstring note
# this function is used when n_samples < n_features and will invert
# a matrix of shape (n_samples, n_samples) making use of the
# woodbury formula:
# https://en.wikipedia.org/wiki/Woodbury_matrix_identity
n_samples = X.shape[0]
X_keep = X[:, keep_lambda]
inv_lambda = 1 / lambda_[keep_lambda].reshape(1, -1)
sigma_ = pinvh(
np.eye(n_samples) / alpha_ + np.dot(X_keep * inv_lambda, X_keep.T)
)
sigma_ = np.dot(sigma_, X_keep * inv_lambda)
sigma_ = - np.dot(inv_lambda.reshape(-1, 1) * X_keep.T, sigma_)
sigma_[np.diag_indices(sigma_.shape[1])] += 1. / lambda_[keep_lambda]
return sigma_
def _update_sigma(self, X, alpha_, lambda_, keep_lambda):
# See slides as referenced in the docstring note
# this function is used when n_samples >= n_features and will
# invert a matrix of shape (n_features, n_features)
X_keep = X[:, keep_lambda]
gram = np.dot(X_keep.T, X_keep)
eye = np.eye(gram.shape[0])
sigma_inv = lambda_[keep_lambda] * eye + alpha_ * gram
sigma_ = pinvh(sigma_inv)
return sigma_
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
return_std : bool, default=False
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array-like of shape (n_samples,)
Mean of predictive distribution of query points.
y_std : array-like of shape (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self.normalize:
X = (X - self.X_offset_) / self.X_scale_
X = X[:, self.lambda_ < self.threshold_lambda]
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
return y_mean, y_std
|
bsd-3-clause
|
khalibartan/pgmpy
|
pgmpy/estimators/ScoreCache.py
|
1
|
4150
|
#!/usr/bin/env python
from pgmpy.estimators import StructureScore
class ScoreCache(StructureScore):
def __init__(self, base_scorer, data, max_size=10000, **kwargs):
"""
A wrapper class for StructureScore instances, which implement a decomposable score,
that caches local scores.
Based on the global decomposition property of Bayesian networks for decomposable scores.
Parameters
----------
base_scorer: StructureScore instance
Has to be a decomposable score.
data: pandas DataFrame instance
DataFrame instance where each column represents one variable.
(If some values in the data are missing the data cells should be set to `numpy.NaN`.
Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.)
max_size: int (optional, default 10_000)
The maximum number of elements allowed in the cache. When the limit is reached, the least recently used
entries will be discarded.
**kwargs
Additional arguments that will be handed to the super constructor.
Reference
---------
Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009
Section 18.3
"""
assert isinstance(base_scorer, StructureScore), "Base scorer has to be of type StructureScore."
self.base_scorer = base_scorer
self.cache = LRUCache(original_function=self._wrapped_original, max_size=int(max_size))
super(ScoreCache, self).__init__(data, **kwargs)
def local_score(self, variable, parents):
hashable = tuple(parents)
return self.cache(variable, hashable)
def _wrapped_original(self, variable, parents):
expected = list(parents)
return self.base_scorer.local_score(variable, expected)
# link fields
_PREV, _NEXT, _KEY, _VALUE = 0, 1, 2, 3
class LRUCache:
def __init__(self, original_function, max_size=10000):
"""
Least-Recently-Used cache.
Acts as a wrapper around a arbitrary function and caches the return values.
Based on the implementation of Raymond Hettinger
(https://stackoverflow.com/questions/2437617/limiting-the-size-of-a-python-dictionary)
Parameters
----------
original_function: callable
The original function that will be wrapped. Return values will be cached.
The function parameters have to be hashable.
max_size: int (optional, default 10_000)
The maximum number of elements allowed within the cache. If the size would be exceeded,
the least recently used element will be removed from the cache.
"""
self.original_function = original_function
self.max_size = max_size
self.mapping = {}
# oldest
self.head = [None, None, None, None]
# newest
self.tail = [self.head, None, None, None]
self.head[_NEXT] = self.tail
def __call__(self, *key):
mapping, head, tail = self.mapping, self.head, self.tail
link = mapping.get(key, head)
if link is head:
# Not yet in map
value = self.original_function(*key)
if len(mapping) >= self.max_size:
# Unlink the least recently used element
old_prev, old_next, old_key, old_value = head[_NEXT]
head[_NEXT] = old_next
old_next[_PREV] = head
del mapping[old_key]
# Add new value as most recently used element
last = tail[_PREV]
link = [last, tail, key, value]
mapping[key] = last[_NEXT] = tail[_PREV] = link
else:
# Unlink element from current position
link_prev, link_next, key, value = link
link_prev[_NEXT] = link_next
link_next[_PREV] = link_prev
# Add as most recently used element
last = tail[_PREV]
last[_NEXT] = tail[_PREV] = link
link[_PREV] = last
link[_NEXT] = tail
return value
|
mit
|
cwu2011/scikit-learn
|
examples/model_selection/randomized_search.py
|
57
|
3208
|
"""
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
iris = load_digits()
X, y = iris.data, iris.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
|
bsd-3-clause
|
rishikksh20/scikit-learn
|
examples/cluster/plot_kmeans_silhouette_analysis.py
|
83
|
5888
|
"""
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhouette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distinct cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
|
bsd-3-clause
|
petosegan/scikit-learn
|
sklearn/metrics/tests/test_score_objects.py
|
84
|
14181
|
import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(return_indicator=True,
allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
return_indicator=True,
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
|
bsd-3-clause
|
paladin74/neural-network-animation
|
matplotlib/tests/test_spines.py
|
10
|
1420
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
import matplotlib
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
@image_comparison(baseline_images=['spines_axes_positions'])
def test_spines_axes_positions():
# SF bug 2852168
fig = plt.figure()
x = np.linspace(0,2*np.pi,100)
y = 2*np.sin(x)
ax = fig.add_subplot(1,1,1)
ax.set_title('centered spines')
ax.plot(x,y)
ax.spines['right'].set_position(('axes',0.1))
ax.yaxis.set_ticks_position('right')
ax.spines['top'].set_position(('axes',0.25))
ax.xaxis.set_ticks_position('top')
ax.spines['left'].set_color('none')
ax.spines['bottom'].set_color('none')
@image_comparison(baseline_images=['spines_data_positions'])
def test_spines_data_positions():
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.spines['left'].set_position(('data', -1.5))
ax.spines['top'].set_position(('data', 0.5))
ax.spines['right'].set_position(('data', -0.5))
ax.spines['bottom'].set_position('zero')
ax.set_xlim([-2,2])
ax.set_ylim([-2,2])
@image_comparison(baseline_images=['spines_capstyle'])
def test_spines_capstyle():
# issue 2542
plt.rc('axes', linewidth=20)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.set_xticks([])
ax.set_yticks([])
|
mit
|
JetBrains/intellij-community
|
python/helpers/pycharm_matplotlib_backend/backend_interagg.py
|
10
|
3831
|
import base64
import matplotlib
import os
import sys
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import FigureManagerBase, ShowBase
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
from datalore.display import debug, display, SHOW_DEBUG_INFO
PY3 = sys.version_info[0] >= 3
index = int(os.getenv("PYCHARM_MATPLOTLIB_INDEX", 0))
rcParams = matplotlib.rcParams
class Show(ShowBase):
def __call__(self, **kwargs):
debug("show() called with args %s" % kwargs)
managers = Gcf.get_all_fig_managers()
if not managers:
debug("Error: Managers list in `Gcf.get_all_fig_managers()` is empty")
return
for manager in managers:
manager.show(**kwargs)
def mainloop(self):
pass
show = Show()
# from pyplot API
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.show()
else:
debug("Error: Figure manager `Gcf.get_active()` is None")
# from pyplot API
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, figure)
# from pyplot API
def new_figure_manager_given_figure(num, figure):
canvas = FigureCanvasInterAgg(figure)
manager = FigureManagerInterAgg(canvas, num)
return manager
# from pyplot API
class FigureCanvasInterAgg(FigureCanvasAgg):
def __init__(self, figure):
FigureCanvasAgg.__init__(self, figure)
def show(self):
FigureCanvasAgg.draw(self)
if matplotlib.__version__ < '1.2':
buffer = self.tostring_rgb(0, 0)
else:
buffer = self.tostring_rgb()
if len(set(buffer)) <= 1:
# do not plot empty
debug("Error: Buffer FigureCanvasAgg.tostring_rgb() is empty")
return
render = self.get_renderer()
width = int(render.width)
debug("Image width: %d" % width)
is_interactive = os.getenv("PYCHARM_MATPLOTLIB_INTERACTIVE", False)
if is_interactive:
debug("Using interactive mode (Run with Python Console)")
debug("Plot index = %d" % index)
else:
debug("Using non-interactive mode (Run without Python Console)")
plot_index = index if is_interactive else -1
display(DisplayDataObject(plot_index, width, buffer))
def draw(self):
FigureCanvasAgg.draw(self)
is_interactive = os.getenv("PYCHARM_MATPLOTLIB_INTERACTIVE", False)
if is_interactive and matplotlib.is_interactive():
self.show()
else:
debug("Error: calling draw() in non-interactive mode won't show a plot. Try to 'Run with Python Console'")
class FigureManagerInterAgg(FigureManagerBase):
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
global index
index += 1
self.canvas = canvas
self._num = num
self._shown = False
def show(self, **kwargs):
self.canvas.show()
Gcf.destroy(self._num)
class DisplayDataObject:
def __init__(self, plot_index, width, image_bytes):
self.plot_index = plot_index
self.image_width = width
self.image_bytes = image_bytes
def _repr_display_(self):
image_bytes_base64 = base64.b64encode(self.image_bytes)
if PY3:
image_bytes_base64 = image_bytes_base64.decode()
body = {
'plot_index': self.plot_index,
'image_width': self.image_width,
'image_base64': image_bytes_base64
}
return ('pycharm-plot-image', body)
|
apache-2.0
|
vibhorag/scikit-learn
|
examples/cluster/plot_digits_agglomeration.py
|
377
|
1694
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
|
bsd-3-clause
|
mpoquet/execo
|
src/execo_g5k/topology.py
|
1
|
24980
|
# Copyright 2009-2016 INRIA Rhone-Alpes, Service Experimentation et
# Developpement
#
# This file is part of Execo.
#
# Execo is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Execo is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Execo. If not, see <http://www.gnu.org/licenses/>
""" A module based on `networkx <http://networkx.github.io/>`_ to create a
topological graph of the Grid'5000 platform. "Nodes" are used to represent
elements (compute nodes, switch, router, renater) and "Edges" are the network
links. Nodes has a kind data (+ power and core for compute nodes)
whereas edges has bandwidth and latency information.
All information comes from the Grid'5000 reference API.
"""
from time import time
from execo import logger, Host
from execo.log import style
from .oar import format_date
from .api_utils import get_g5k_sites, get_host_site, \
get_host_cluster, get_g5k_clusters, get_cluster_hosts, \
get_site_clusters, get_api_data, get_g5k_hosts, \
get_network_equipment_attributes, get_host_shortname
import networkx as nx
try:
import matplotlib.pyplot as plt
except:
pass
arbitrary_latency = 2.25E-3
suffix = '.grid5000.fr'
class g5k_graph(nx.MultiGraph):
"""Main graph representing the topology of the Grid'5000 platform. All
nodes elements are defined with their FQDN"""
def __init__(self, elements=None):
"""Create the :func:`~nx.MultiGraph` representing Grid'5000 network
topology
:param sites: add the topology of the given site(s)"""
logger.debug('Initializing g5k_graph')
super(g5k_graph, self).__init__()
self.data = get_api_data()
self.graph['api_commit'] = self.data['network']['backbone'][0]['version']
self.graph['date'] = format_date(time())
if elements:
if isinstance(elements, str):
elements = [elements]
for e in elements:
if isinstance(e, Host):
e = get_host_shortname(e.address)
e = e.split('.')[0]
if e in get_g5k_sites():
self.add_site(e, self.data['sites'][e])
if e in get_g5k_clusters():
self.add_cluster(e, self.data['clusters'][e])
if e in get_g5k_hosts():
self.add_host(e, self.data['hosts'][e])
if len(self.get_sites()) > 1:
self.add_backbone()
# add/update/rm elements, public methods
def add_host(self, host, data=None):
"""Add a host in the graph
:param host: a string corresponding to the node name
:param data: a dict containing the Grid'5000 host attributes"""
if isinstance(host, Host):
_host = get_host_shortname(host.address)
else:
_host = host
if data:
power = data['performance']['core_flops']
cores = data['architecture']['nb_cores']
else:
power = 0
cores = 0
if len(self.get_host_adapters(_host)) > 0:
logger.debug('Adding %s', style.host(_host))
self.add_node(_host, {'kind': 'node',
'power': power,
'cores': cores})
for eq in self.get_host_adapters(_host):
if eq['mounted']:
self.add_equip(eq['switch'], get_host_site(_host))
else:
logger.warning('Node %s has no valid network connection',
_host)
def rm_host(self, host):
"""Remove the host from the graph"""
logger.debug('Removing host %s', style.host(host))
self.remove_node(host)
for eq in self.get_host_adapters(host):
if not self._equip_has_nodes(eq['switch']):
logger.debug('Removing equip %s', eq['switch'])
self.rm_equip(eq['switch'])
def add_cluster(self, cluster, data=None):
"""Add the cluster to the graph"""
for h in get_cluster_hosts(cluster):
self.add_host(h, self.data['hosts'][h])
def rm_cluster(self, cluster):
"""Remove the cluster from the graph"""
for h in get_cluster_hosts(cluster):
if self.has_node(h):
self.rm_host(h)
def add_site(self, site, data=None):
"""Add a site to the graph"""
for c in get_site_clusters(site):
for h in get_cluster_hosts(c):
self.add_host(h, self.data['hosts'][h])
def rm_site(self, site):
"""Remove the site from the graph"""
for c in get_site_clusters(site):
self.rm_cluster(c)
if self.get_site_router(site) in self.nodes():
self.rm_equip(self.get_site_router(site))
def add_equip(self, equip, site):
"""Add a network equipment """
if equip not in self.data['network'][site]:
logger.warning('Equipment %s not described in API' % (equip,))
return
data = self.data['network'][site][equip]
logger.debug('Adding equipment %s', equip)
self.add_node(equip, kind=data['kind'],
backplane=data['backplane_bps'])
lc_data = data['linecards']
if data['kind'] == 'router':
router_bw = data['backplane_bps']
for i_lc, lc in enumerate([n for n in lc_data if 'ports' in n]):
lc_node = equip + '_lc' + str(i_lc)
lc_has_element = False
for port in sorted([p for p in lc['ports'] if 'uid' in p]):
kind = port['kind'] if 'kind' in port else lc['kind']
bandwidth = lc['rate'] if 'rate' not in port else port['rate']
if self.has_node(port['uid']):
if kind == 'node':
for e in self.get_host_adapters(port['uid']):
if e['switch'] == equip:
if ('port' in port and port['port'] == e['device']) or ('port' not in port and e['device'] == 'eth0'):
lc_has_element = True
key1 = lc_node + '_' + port['uid'] + '_' + e['device']
logger.debug('Adding link between %s and %s %s' % (
lc_node, port['uid'], e['device']))
self.add_edge(lc_node, port['uid'], key1,
bandwidth=bandwidth,
active=e['mounted'])
key2 = equip + '_' + lc_node
logger.debug('Adding link between %s and %s',
equip, lc_node)
self.add_edge(equip, lc_node, key2,
bandwidth=router_bw, active=True)
if kind == 'switch':
lc_has_element = True
key1 = lc_node + '_' + port['uid']
self.add_edge(lc_node, port['uid'], key1,
bandwidth=bandwidth, active=True)
key2 = equip + '_' + lc_node
self.add_edge(equip, lc_node, key2,
bandwidth=router_bw, active=True)
if 'renater' in port['uid']:
lc_has_element = True
self.add_node(port['uid'], kind='renater')
key1 = lc_node + ' ' + port['uid']
self.add_edge(lc_node, port['uid'], key1,
bandwidth=bandwidth, active=True)
key2 = equip + '_' + lc_node
self.add_edge(equip, lc_node, key2,
bandwidth=router_bw, active=True)
if lc_has_element:
logger.debug('Adding linecard %s', lc_node)
backplane = lc['backplane_bps'] if 'backplane_bps' \
in lc else data['backplane_bps']
self.add_node(lc_node, kind='linecard',
backplane=backplane)
else:
# some switch have two linecards ?? pat, sgraphene1 => REPORT BUG
for lc in [n for n in lc_data if 'ports' in n]:
for port in sorted([p for p in lc['ports'] if 'uid' in p]):
kind = port['kind'] if 'kind' in port else lc['kind']
bandwidth = lc['rate'] if 'rate' not in port else port['rate']
if self.has_node(port['uid']):
if kind == 'node':
for e in self.get_host_adapters(port['uid']):
if e['switch'] == equip:
key = equip + '_' + port['uid'] + '_' + e['device']
self.add_edge(equip, port['uid'], key,
bandwidth=bandwidth,
active=e['mounted'])
if kind == 'switch':
key = equip + '_' + port['uid']
self.add_edge(equip, port['uid'], key,
bandwidth=bandwidth, active=True)
if kind == 'router':
self.add_equip(port['uid'], site)
def rm_equip(self, equip):
"""Remove an equipment from the node"""
logger.debug('Removing equip %s', style.host(equip))
self.remove_node(equip)
if get_network_equipment_attributes(equip)['kind'] == 'router':
lc_nodes = [x for x in self.nodes() if equip in x]
logger.debug('Removing router linecard %s', ' '.join(lc_nodes))
self.remove_nodes_from(lc_nodes)
def add_backbone(self):
"""Add the nodes corresponding to Renater equipments"""
logger.debug('Add %s network', style.emph('Renater'))
backbone = self.data['network']['backbone']
for equip in backbone:
src = equip['uid']
self.add_node(src, kind='renater')
for lc in equip['linecards']:
for port in lc['ports']:
if 'uid' in port and 'renater-' in port['uid']:
bandwidth = lc['rate'] if 'rate' not in port else port['rate']
latency = port['latency'] if 'latency' in port \
else arbitrary_latency
kind = 'renater' if 'kind' not in port else port['kind']
dst = port['uid']
logger.debug('* %s (%s, bw=%s, lat=%s)', dst, kind,
bandwidth, latency)
self.add_node(dst, kind=kind)
if not self.has_edge(src, dst):
self.add_edge(src, dst, bandwidth=bandwidth,
latency=latency, active=True)
# Removing unused one
if self.get_sites != get_g5k_sites():
logger.debug('Removing unused Renater equipments')
used_elements = []
for site in self.get_sites():
dests = self.get_sites()[:]
dests.remove(site)
for dest in dests:
gw_src = self.get_site_router(site)[0]
gw_dst = self.get_site_router(dest)[0]
for element in [el for el in nx.shortest_path(self, gw_src, gw_dst) if 'renater' in el]:
if element not in used_elements:
used_elements.append(element)
for element, _ in [n for n in self.nodes_iter(data=True) if n[1]['kind'] == 'renater']:
if element not in used_elements:
self.remove_node(element)
def rm_backbone(self):
"""Remove all elements from the backbone"""
self.remove_nodes_from(self.get_backbone())
# get elements, public methods
def get_hosts(self, cluster=None, site=None):
"""Return the list of nodes corresponding to hosts"""
if cluster:
return [x for x in self.nodes(True) if cluster in x[0] and
x[1]['kind'] == 'node']
elif site:
return [x for x in self.nodes(True) if site == get_host_site(x[0]) and
x[1]['kind'] == 'node']
else:
return [x for x in self.nodes(True) if x[1]['kind'] == 'node']
def get_clusters(self, site=None):
"""Return the list of clusters"""
if site:
return list(set([get_host_cluster(x[0]) for x in self.nodes(True) if get_host_site(x[0]) and x[1]['kind'] == 'node']))
else:
return list(set([get_host_cluster(x[0]) for x in self.nodes(True) if x[1]['kind'] == 'node']))
def get_host_neighbours(self, host):
"""Return the compute nodes that are connected to the same switch,
router or linecard"""
switch = self.get_host_adapters(host)[0]['switch']
return [x for x in self.get_equip_hosts(switch) if x != host]
def get_equip_hosts(self, equip):
"""Return the nodes which are connected to the equipment"""
hosts = []
if self.node[equip]['kind'] == 'router':
lcs = self.neighbors(equip)
else:
lcs = ['equip']
for lc in lcs:
for n in self.neighbors(lc):
if self.node[n]['kind'] == 'node':
hosts.append(n)
return hosts
def get_site_router(self, site):
"""Return the node corresponding to the router of a site"""
return [n for n in self.nodes(True) if n[1]['kind'] == 'router' and site in n[0]][0]
def get_sites(self):
"""Return the list of sites"""
return list(set([get_host_site(x[0]) for x in self.nodes(True) if x[1]['kind'] == 'node']))
def get_backbone(self):
"""Return """
return [x for x in self.nodes(True) if x[1]['kind'] == 'renater']
def get_host_adapters(self, host):
"""Return the mountable network interfaces from a host"""
try:
if host in self.data['hosts']:
return [m for m in self.data['hosts'][host]['network_adapters']
if 'switch' in m
and not m['management']
and m['mountable']
and m['switch']
and m['interface'] == 'Ethernet']
except:
logger.warning("Wrong description for host %s" % style.host(host))
logger.debug("host's network_adapters = %s" % (self.data['hosts'][host]['network_adapters'],))
return []
def _equip_has_nodes(self, equip):
""" """
data = get_network_equipment_attributes(equip)
if data['kind'] == 'router':
return True
for lc in [n for n in data['linecards'] if 'ports' in n]:
for port in sorted([p for p in lc['ports'] if 'uid' in p]):
kind = port['kind'] if 'kind' in port else lc['kind']
if kind == 'node' and self.has_node(port['uid']):
return True
return False
def treemap(gr, nodes_legend=None, edges_legend=None, nodes_labels=None,
layout='neato', compact=False):
"""Create a treemap of the topology and return a matplotlib figure
:param nodes_legend: a dict of dicts containing the parameter used to draw
the nodes, such as 'myelement': {'color': '#9CF7BC', 'shape': 'p',
'size': 200}
:param edges_legend: a dict of dicts containing the parameter used to draw
the edges, such as bandwidth: {'width': 0.2, 'color': '#666666'}
:param nodes_labels: a dict of dicts containing the font parameters for
the labels, such as 'myelement ': {'nodes': {}, 'font_size': 8,
'font_weight': 'bold', 'str_func': lambda n: n.split('.')[1].title()}
:param layout: the graphviz tool to be used to compute node position
:param compact: represent only on node for a cluster/cabinet
WARNING: This function use matplotlib.figure that by default requires a
DISPLAY. If you want use this on a headless host, you need to change the
matplotlib backend before to import execo_g5k.topology module.
"""
base_size = 2
_default_color = '#000000'
_default_shape = 'o'
_default_size = 100
_default_width = 0.8
_default_font_size = 10
_default_font_weight = 'normal'
def _default_str_func(n):
return n.split('.')[0]
def _default_nodes_legend():
"""Create a default legend for the nodes"""
return {'renater':
{'color': '#9CF7BC', 'shape': 'p', 'size': 200},
'router':
{'color': '#BFDFF2', 'shape': '8', 'size': 300,
'width': 0.5},
'switch':
{'color': '#F5C9CD', 'shape': 's', 'size': 100,
'width': 0.2},
'node':
{'color': '#F0F7BE', 'shape': 'o', 'size': 30,
'width': 0.2},
'cluster':
{'color': '#F0F7BE', 'shape': 'd', 'size': 200,
'width': _default_width},
'default':
{'color': _default_color, 'shape': _default_shape,
'size': _default_size},
'linecard':
{'size': 10, 'shape': '^', 'color': 'w', 'width': 0.1},
}
def _default_edges_legend():
"""Defines the width and color of the edges based on bandwidth"""
return {100000000: {'width': 0.2, 'color': '#666666'},
1000000000: {'width': 0.4, 'color': '#666666'},
3000000000: {'width': 0.6, 'color': '#333333'},
10000000000: {'width': 1.0, 'color': '#111111'},
20000000000: {'width': 2.0, 'color': '#111111'},
30000000000: {'width': 3.0, 'color': '#111111'},
40000000000: {'width': 4.0, 'color': '#111111'},
'default': {'width': _default_width, 'color': _default_color}}
def _default_nodes_labels(compact=False):
"""Defines the font labels"""
def _default_str_func(n):
return n.split('.')[0]
return {'renater':
{'nodes': {},
'font_size': base_size * 4,
'font_weight': 'normal',
'str_func': lambda n: n.split('-')[1].title()},
'router':
{'nodes': {},
'font_size': base_size * 4,
'font_weight': 'bold'},
'switch':
{'nodes': {},
'font_size': base_size * 4,
'font_weight': 'normal'},
'cluster':
{'nodes': {},
'font_size': base_size * 4,
'font_weight': 'normal'},
'node':
{'nodes': {},
'font_size': base_size * 3,
'font_weight': 'normal'},
'default':
{'nodes': {},
'font_size': _default_font_size,
'font_weight': _default_font_weight,
'str_func': _default_str_func},
'linecard':
{'nodes': {},
'font_size': base_size * 3,
'str_func': lambda n: n.split('_')[1]}
}
# Setting legend and labels
_nodes_legend = _default_nodes_legend()
_edges_legend = _default_edges_legend()
_nodes_labels = _default_nodes_labels(compact)
if nodes_legend:
_nodes_legend.update(nodes_legend)
if edges_legend:
_edges_legend.update(edges_legend)
if nodes_labels:
_nodes_labels.update(nodes_labels)
if not compact:
elements = ['renater', 'router', 'switch', 'node', 'linecard']
else:
for site in gr.get_sites():
for cluster, data in gr.get_clusters().items():
for equip, radicals in data['equips'].items():
gr.add_node(cluster + '\n' + radicals,
{'kind': 'cluster'})
gr.add_edge(cluster + '\n' + radicals, equip,
{'bandwidth': data['bandwidth']})
gr.remove_nodes_from([n[0] for n in gr.nodes(True) if n[1]['kind'] == 'node'])
elements = ['renater', 'router', 'switch', 'cluster']
logger.debug('Legend and labels initialized')
# Initializing plot
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
logger.debug('Defining positions')
try:
pos = nx.graphviz_layout(gr, prog=layout)
except:
logger.warning('Error in generating graphviz layout, will use ' +
'spring layout that does not scale well ...')
raise
pos = nx.spring_layout(gr, iterations=100)
# Adding the nodes
for k in elements:
nodes = [node[0] for node in gr.nodes_iter(data=True)
if 'kind' in node[1] and node[1]['kind'] == k]
if k not in _nodes_legend:
_nodes_legend[k] = _nodes_legend['default']
nodes = nx.draw_networkx_nodes(gr, pos, nodelist=nodes,
node_shape=_nodes_legend[k]['shape']
if 'shape' in _nodes_legend[k] else
_default_shape,
node_color=_nodes_legend[k]['color']
if 'color' in _nodes_legend[k] else
_default_color,
node_size=_nodes_legend[k]['size']
if 'size' in _nodes_legend[k] else
_default_size,
linewidths=_nodes_legend[k]['width']
if 'width' in _nodes_legend[k] else
_default_width)
# Adding the edges
for bandwidth, params in _edges_legend.items():
if bandwidth != 'other':
edges = [(edge[0], edge[1]) for edge in gr.edges_iter(data=True)
if 'bandwidth' in edge[2] and edge[2]['bandwidth'] == bandwidth]
nx.draw_networkx_edges(gr, pos, edgelist=edges,
width=params['width'] if 'width' in params
else _default_width,
edge_color=params['color'] if 'color' in params
else _default_color)
edges = [(edge[0], edge[1]) for edge in gr.edges_iter(data=True)
if edge[2]['bandwidth'] not in _edges_legend]
nx.draw_networkx_edges(gr, pos, edgelist=edges,
width=_edges_legend['default']['width'],
edge_color=_edges_legend['default']['color'])
# Adding the labels
for node, data in gr.nodes_iter(data=True):
if 'nodes' not in _nodes_labels[data['kind']]:
_nodes_labels[data['kind']]['nodes'] = {}
if data['kind'] in _nodes_labels:
_nodes_labels[data['kind']]['nodes'][node] = _nodes_labels[data['kind']]['str_func'](node) \
if 'str_func' in _nodes_labels[data['kind']] else _default_str_func(node)
else:
_nodes_labels['default']['nodes'][node] = _nodes_labels['default']['str_func'](node)
for data in _nodes_labels.values():
nx.draw_networkx_labels(gr, pos, labels=data['nodes'],
font_size=data['font_size']
if 'font_size' in data else _default_font_size,
font_weight=data['font_weight']
if 'font_weight' in data else _default_font_weight)
plt.axis('off')
plt.tight_layout()
title = 'Created by execo_g5k.topology \n%s\nAPI commit %s' % \
(gr.graph['date'], gr.graph['api_commit'])
plt.text(0.1, 0, title, transform=ax.transAxes)
return fig
|
gpl-3.0
|
bikong2/scikit-learn
|
sklearn/covariance/tests/test_graph_lasso.py
|
272
|
5245
|
""" Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
|
bsd-3-clause
|
aetilley/scikit-learn
|
benchmarks/bench_sgd_regression.py
|
283
|
5569
|
"""
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
|
bsd-3-clause
|
jinghaomiao/apollo
|
modules/tools/map_gen/plot_path.py
|
3
|
1163
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import sys
import matplotlib.pyplot as plt
f = open(sys.argv[1], 'r')
xs = []
ys = []
for line in f:
line = line.replace("\n", '')
data = line.split(',')
x = float(data[0])
y = float(data[1])
xs.append(x)
ys.append(y)
f.close()
fig = plt.figure()
ax = plt.subplot2grid((1, 1), (0, 0))
ax.plot(xs, ys, "b-", lw=3, alpha=0.8)
ax.axis('equal')
plt.show()
|
apache-2.0
|
shoyer/xarray
|
xarray/util/print_versions.py
|
1
|
5036
|
"""Utility functions for printing version information."""
import importlib
import locale
import os
import platform
import struct
import subprocess
import sys
def get_sys_info():
"""Returns system information as a dict"""
blob = []
# get full commit hash
commit = None
if os.path.isdir(".git") and os.path.isdir("xarray"):
try:
pipe = subprocess.Popen(
'git log --format="%H" -n 1'.split(" "),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
so, _ = pipe.communicate()
except Exception:
pass
else:
if pipe.returncode == 0:
commit = so
try:
commit = so.decode("utf-8")
except ValueError:
pass
commit = commit.strip().strip('"')
blob.append(("commit", commit))
try:
(sysname, _nodename, release, _version, machine, processor) = platform.uname()
blob.extend(
[
("python", sys.version),
("python-bits", struct.calcsize("P") * 8),
("OS", "%s" % (sysname)),
("OS-release", "%s" % (release)),
# ("Version", "%s" % (version)),
("machine", "%s" % (machine)),
("processor", "%s" % (processor)),
("byteorder", "%s" % sys.byteorder),
("LC_ALL", "%s" % os.environ.get("LC_ALL", "None")),
("LANG", "%s" % os.environ.get("LANG", "None")),
("LOCALE", "%s.%s" % locale.getlocale()),
]
)
except Exception:
pass
return blob
def netcdf_and_hdf5_versions():
libhdf5_version = None
libnetcdf_version = None
try:
import netCDF4
libhdf5_version = netCDF4.__hdf5libversion__
libnetcdf_version = netCDF4.__netcdf4libversion__
except ImportError:
try:
import h5py
libhdf5_version = h5py.version.hdf5_version
except ImportError:
pass
return [("libhdf5", libhdf5_version), ("libnetcdf", libnetcdf_version)]
def show_versions(file=sys.stdout):
""" print the versions of xarray and its dependencies
Parameters
----------
file : file-like, optional
print to the given file-like object. Defaults to sys.stdout.
"""
sys_info = get_sys_info()
try:
sys_info.extend(netcdf_and_hdf5_versions())
except Exception as e:
print(f"Error collecting netcdf / hdf5 version: {e}")
deps = [
# (MODULE_NAME, f(mod) -> mod version)
("xarray", lambda mod: mod.__version__),
("pandas", lambda mod: mod.__version__),
("numpy", lambda mod: mod.__version__),
("scipy", lambda mod: mod.__version__),
# xarray optionals
("netCDF4", lambda mod: mod.__version__),
("pydap", lambda mod: mod.__version__),
("h5netcdf", lambda mod: mod.__version__),
("h5py", lambda mod: mod.__version__),
("Nio", lambda mod: mod.__version__),
("zarr", lambda mod: mod.__version__),
("cftime", lambda mod: mod.__version__),
("nc_time_axis", lambda mod: mod.__version__),
("PseudoNetCDF", lambda mod: mod.__version__),
("rasterio", lambda mod: mod.__version__),
("cfgrib", lambda mod: mod.__version__),
("iris", lambda mod: mod.__version__),
("bottleneck", lambda mod: mod.__version__),
("dask", lambda mod: mod.__version__),
("distributed", lambda mod: mod.__version__),
("matplotlib", lambda mod: mod.__version__),
("cartopy", lambda mod: mod.__version__),
("seaborn", lambda mod: mod.__version__),
("numbagg", lambda mod: mod.__version__),
("pint", lambda mod: mod.__version__),
# xarray setup/test
("setuptools", lambda mod: mod.__version__),
("pip", lambda mod: mod.__version__),
("conda", lambda mod: mod.__version__),
("pytest", lambda mod: mod.__version__),
# Misc.
("IPython", lambda mod: mod.__version__),
("sphinx", lambda mod: mod.__version__),
]
deps_blob = list()
for (modname, ver_f) in deps:
try:
if modname in sys.modules:
mod = sys.modules[modname]
else:
mod = importlib.import_module(modname)
except Exception:
deps_blob.append((modname, None))
else:
try:
ver = ver_f(mod)
deps_blob.append((modname, ver))
except Exception:
deps_blob.append((modname, "installed"))
print("\nINSTALLED VERSIONS", file=file)
print("------------------", file=file)
for k, stat in sys_info:
print(f"{k}: {stat}", file=file)
print("", file=file)
for k, stat in deps_blob:
print(f"{k}: {stat}", file=file)
if __name__ == "__main__":
show_versions()
|
apache-2.0
|
ashhher3/scikit-learn
|
examples/svm/plot_separating_hyperplane_unbalanced.py
|
329
|
1850
|
"""
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
.. currentmodule:: sklearn.linear_model
.. note::
This example will also work by replacing ``SVC(kernel="linear")``
with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter
of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour
such as that of a SVC with a linear kernel.
For example try instead of the ``SVC``::
clf = SGDClassifier(n_iter=100, alpha=0.01)
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
#from sklearn.linear_model import SGDClassifier
# we create 40 separable points
rng = np.random.RandomState(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * rng.randn(n_samples_1, 2),
0.5 * rng.randn(n_samples_2, 2) + [2, 2]]
y = [0] * (n_samples_1) + [1] * (n_samples_2)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - clf.intercept_[0] / w[1]
# get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
ww = wclf.coef_[0]
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_[0] / ww[1]
# plot separating hyperplanes and samples
h0 = plt.plot(xx, yy, 'k-', label='no weights')
h1 = plt.plot(xx, wyy, 'k--', label='with weights')
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.legend()
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
motazsaad/tweets-collector
|
text2xlsx.py
|
1
|
2050
|
from django.utils import encoding
from pandas import DataFrame
import pandas as pd
import argparse
import os
from pandas import ExcelWriter
parser = argparse.ArgumentParser(description='convert tweets text to excel format')
parser.add_argument('-i', '--infile', type=str,
help='input file (csv format).', required=True)
parser.add_argument('-o', '--outfile', type=str,
help='output file (excel format).', required=True)
parser.add_argument('-d', '--delimiter', type=str,
help='field delimiter.', required=True)
parser.add_argument('-l', '--lines-number', type=int,
help='number of lines per excel file', required=True)
# input - df: a Dataframe, chunkSize: the chunk size
# output - a list of DataFrame
# purpose - splits the DataFrame into smaller of max size chunkSize (last is smaller)
def split_dataframe(df, chunk_size=1000):
list_of_df = list()
number_chunks = len(df) // chunk_size + 1
for i in range(number_chunks):
list_of_df.append(df[i*chunk_size:(i+1)*chunk_size])
return list_of_df
def export_text2xlsx(infile, outfile, field_delimiter, number):
df = pd.read_csv(infile, delimiter=field_delimiter, engine='python')
rows_number = df.shape[0]
if rows_number > number:
data_frames = split_dataframe(df, number)
frame_number = 1
for frame in data_frames:
filename, ext = os.path.splitext(outfile)
excel_file = "{}_{}.xlsx".format(filename, frame_number)
writer = ExcelWriter(excel_file, engine='xlsxwriter')
frame.to_excel(writer, 'sheet1')
writer.save()
frame_number += 1
else:
writer = ExcelWriter(outfile)
df.to_excel(writer, 'sheet1')
writer.save()
if __name__ == '__main__':
args = parser.parse_args()
infile = args.infile
outfile = args.outfile
delimiter = args.delimiter
lines_number = args.lines_number
export_text2xlsx(infile, outfile, delimiter, lines_number)
|
apache-2.0
|
kernc/scikit-learn
|
sklearn/manifold/tests/test_locally_linear.py
|
27
|
5247
|
from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
# Test the error raised when parameter passed to lle is invalid
def test_lle_init_parameters():
X = np.random.rand(5, 3)
clf = manifold.LocallyLinearEmbedding(eigen_solver="error")
msg = "unrecognized eigen_solver 'error'"
assert_raise_message(ValueError, msg, clf.fit, X)
clf = manifold.LocallyLinearEmbedding(method="error")
msg = "unrecognized method 'error'"
assert_raise_message(ValueError, msg, clf.fit, X)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
|
bsd-3-clause
|
wesm/arrow
|
python/pyarrow/tests/parquet/test_basic.py
|
1
|
20877
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import io
import numpy as np
import pytest
import pyarrow as pa
from pyarrow import fs
from pyarrow.filesystem import LocalFileSystem
from pyarrow.tests import util
from pyarrow.tests.parquet.common import (_check_roundtrip, _roundtrip_table,
parametrize_legacy_dataset)
try:
import pyarrow.parquet as pq
from pyarrow.tests.parquet.common import _read_table, _write_table
except ImportError:
pq = None
try:
import pandas as pd
import pandas.testing as tm
from pyarrow.tests.pandas_examples import dataframe_with_lists
from pyarrow.tests.parquet.common import alltypes_sample
except ImportError:
pd = tm = None
pytestmark = pytest.mark.parquet
def test_parquet_invalid_version(tempdir):
table = pa.table({'a': [1, 2, 3]})
with pytest.raises(ValueError, match="Unsupported Parquet format version"):
_write_table(table, tempdir / 'test_version.parquet', version="2.2")
with pytest.raises(ValueError, match="Unsupported Parquet data page " +
"version"):
_write_table(table, tempdir / 'test_version.parquet',
data_page_version="2.2")
@parametrize_legacy_dataset
def test_set_data_page_size(use_legacy_dataset):
arr = pa.array([1, 2, 3] * 100000)
t = pa.Table.from_arrays([arr], names=['f0'])
# 128K, 512K
page_sizes = [2 << 16, 2 << 18]
for target_page_size in page_sizes:
_check_roundtrip(t, data_page_size=target_page_size,
use_legacy_dataset=use_legacy_dataset)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_chunked_table_write(use_legacy_dataset):
# ARROW-232
tables = []
batch = pa.RecordBatch.from_pandas(alltypes_sample(size=10))
tables.append(pa.Table.from_batches([batch] * 3))
df, _ = dataframe_with_lists()
batch = pa.RecordBatch.from_pandas(df)
tables.append(pa.Table.from_batches([batch] * 3))
for data_page_version in ['1.0', '2.0']:
for use_dictionary in [True, False]:
for table in tables:
_check_roundtrip(
table, version='2.0',
use_legacy_dataset=use_legacy_dataset,
data_page_version=data_page_version,
use_dictionary=use_dictionary)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_memory_map(tempdir, use_legacy_dataset):
df = alltypes_sample(size=10)
table = pa.Table.from_pandas(df)
_check_roundtrip(table, read_table_kwargs={'memory_map': True},
version='2.0', use_legacy_dataset=use_legacy_dataset)
filename = str(tempdir / 'tmp_file')
with open(filename, 'wb') as f:
_write_table(table, f, version='2.0')
table_read = pq.read_pandas(filename, memory_map=True,
use_legacy_dataset=use_legacy_dataset)
assert table_read.equals(table)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_enable_buffered_stream(tempdir, use_legacy_dataset):
df = alltypes_sample(size=10)
table = pa.Table.from_pandas(df)
_check_roundtrip(table, read_table_kwargs={'buffer_size': 1025},
version='2.0', use_legacy_dataset=use_legacy_dataset)
filename = str(tempdir / 'tmp_file')
with open(filename, 'wb') as f:
_write_table(table, f, version='2.0')
table_read = pq.read_pandas(filename, buffer_size=4096,
use_legacy_dataset=use_legacy_dataset)
assert table_read.equals(table)
@parametrize_legacy_dataset
def test_special_chars_filename(tempdir, use_legacy_dataset):
table = pa.Table.from_arrays([pa.array([42])], ["ints"])
filename = "foo # bar"
path = tempdir / filename
assert not path.exists()
_write_table(table, str(path))
assert path.exists()
table_read = _read_table(str(path), use_legacy_dataset=use_legacy_dataset)
assert table_read.equals(table)
@pytest.mark.slow
def test_file_with_over_int16_max_row_groups():
# PARQUET-1857: Parquet encryption support introduced a INT16_MAX upper
# limit on the number of row groups, but this limit only impacts files with
# encrypted row group metadata because of the int16 row group ordinal used
# in the Parquet Thrift metadata. Unencrypted files are not impacted, so
# this test checks that it works (even if it isn't a good idea)
t = pa.table([list(range(40000))], names=['f0'])
_check_roundtrip(t, row_group_size=1)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_empty_table_roundtrip(use_legacy_dataset):
df = alltypes_sample(size=10)
# Create a non-empty table to infer the types correctly, then slice to 0
table = pa.Table.from_pandas(df)
table = pa.Table.from_arrays(
[col.chunk(0)[:0] for col in table.itercolumns()],
names=table.schema.names)
assert table.schema.field('null').type == pa.null()
assert table.schema.field('null_list').type == pa.list_(pa.null())
_check_roundtrip(
table, version='2.0', use_legacy_dataset=use_legacy_dataset)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_empty_table_no_columns(use_legacy_dataset):
df = pd.DataFrame()
empty = pa.Table.from_pandas(df, preserve_index=False)
_check_roundtrip(empty, use_legacy_dataset=use_legacy_dataset)
@parametrize_legacy_dataset
def test_write_nested_zero_length_array_chunk_failure(use_legacy_dataset):
# Bug report in ARROW-3792
cols = OrderedDict(
int32=pa.int32(),
list_string=pa.list_(pa.string())
)
data = [[], [OrderedDict(int32=1, list_string=('G',)), ]]
# This produces a table with a column like
# <Column name='list_string' type=ListType(list<item: string>)>
# [
# [],
# [
# [
# "G"
# ]
# ]
# ]
#
# Each column is a ChunkedArray with 2 elements
my_arrays = [pa.array(batch, type=pa.struct(cols)).flatten()
for batch in data]
my_batches = [pa.RecordBatch.from_arrays(batch, schema=pa.schema(cols))
for batch in my_arrays]
tbl = pa.Table.from_batches(my_batches, pa.schema(cols))
_check_roundtrip(tbl, use_legacy_dataset=use_legacy_dataset)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_multiple_path_types(tempdir, use_legacy_dataset):
# Test compatibility with PEP 519 path-like objects
path = tempdir / 'zzz.parquet'
df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)})
_write_table(df, path)
table_read = _read_table(path, use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
# Test compatibility with plain string paths
path = str(tempdir) + 'zzz.parquet'
df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)})
_write_table(df, path)
table_read = _read_table(path, use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.dataset
@parametrize_legacy_dataset
@pytest.mark.parametrize("filesystem", [
None, fs.LocalFileSystem(), LocalFileSystem._get_instance()
])
def test_relative_paths(tempdir, use_legacy_dataset, filesystem):
# reading and writing from relative paths
table = pa.table({"a": [1, 2, 3]})
# reading
pq.write_table(table, str(tempdir / "data.parquet"))
with util.change_cwd(tempdir):
result = pq.read_table("data.parquet", filesystem=filesystem,
use_legacy_dataset=use_legacy_dataset)
assert result.equals(table)
# writing
with util.change_cwd(tempdir):
pq.write_table(table, "data2.parquet", filesystem=filesystem)
result = pq.read_table(tempdir / "data2.parquet")
assert result.equals(table)
@parametrize_legacy_dataset
def test_read_non_existing_file(use_legacy_dataset):
# ensure we have a proper error message
with pytest.raises(FileNotFoundError):
pq.read_table('i-am-not-existing.parquet')
@parametrize_legacy_dataset
def test_parquet_read_from_buffer(tempdir, use_legacy_dataset):
# reading from a buffer from python's open()
table = pa.table({"a": [1, 2, 3]})
pq.write_table(table, str(tempdir / "data.parquet"))
with open(str(tempdir / "data.parquet"), "rb") as f:
result = pq.read_table(f, use_legacy_dataset=use_legacy_dataset)
assert result.equals(table)
with open(str(tempdir / "data.parquet"), "rb") as f:
result = pq.read_table(pa.PythonFile(f),
use_legacy_dataset=use_legacy_dataset)
assert result.equals(table)
@parametrize_legacy_dataset
def test_byte_stream_split(use_legacy_dataset):
# This is only a smoke test.
arr_float = pa.array(list(map(float, range(100))))
arr_int = pa.array(list(map(int, range(100))))
data_float = [arr_float, arr_float]
table = pa.Table.from_arrays(data_float, names=['a', 'b'])
# Check with byte_stream_split for both columns.
_check_roundtrip(table, expected=table, compression="gzip",
use_dictionary=False, use_byte_stream_split=True)
# Check with byte_stream_split for column 'b' and dictionary
# for column 'a'.
_check_roundtrip(table, expected=table, compression="gzip",
use_dictionary=['a'],
use_byte_stream_split=['b'])
# Check with a collision for both columns.
_check_roundtrip(table, expected=table, compression="gzip",
use_dictionary=['a', 'b'],
use_byte_stream_split=['a', 'b'])
# Check with mixed column types.
mixed_table = pa.Table.from_arrays([arr_float, arr_int],
names=['a', 'b'])
_check_roundtrip(mixed_table, expected=mixed_table,
use_dictionary=['b'],
use_byte_stream_split=['a'])
# Try to use the wrong data type with the byte_stream_split encoding.
# This should throw an exception.
table = pa.Table.from_arrays([arr_int], names=['tmp'])
with pytest.raises(IOError):
_check_roundtrip(table, expected=table, use_byte_stream_split=True,
use_dictionary=False,
use_legacy_dataset=use_legacy_dataset)
@parametrize_legacy_dataset
def test_compression_level(use_legacy_dataset):
arr = pa.array(list(map(int, range(1000))))
data = [arr, arr]
table = pa.Table.from_arrays(data, names=['a', 'b'])
# Check one compression level.
_check_roundtrip(table, expected=table, compression="gzip",
compression_level=1,
use_legacy_dataset=use_legacy_dataset)
# Check another one to make sure that compression_level=1 does not
# coincide with the default one in Arrow.
_check_roundtrip(table, expected=table, compression="gzip",
compression_level=5,
use_legacy_dataset=use_legacy_dataset)
# Check that the user can provide a compression per column
_check_roundtrip(table, expected=table,
compression={'a': "gzip", 'b': "snappy"},
use_legacy_dataset=use_legacy_dataset)
# Check that the user can provide a compression level per column
_check_roundtrip(table, expected=table, compression="gzip",
compression_level={'a': 2, 'b': 3},
use_legacy_dataset=use_legacy_dataset)
# Check that specifying a compression level for a codec which does allow
# specifying one, results into an error.
# Uncompressed, snappy, lz4 and lzo do not support specifying a compression
# level.
# GZIP (zlib) allows for specifying a compression level but as of up
# to version 1.2.11 the valid range is [-1, 9].
invalid_combinations = [("snappy", 4), ("lz4", 5), ("gzip", -1337),
("None", 444), ("lzo", 14)]
buf = io.BytesIO()
for (codec, level) in invalid_combinations:
with pytest.raises((ValueError, OSError)):
_write_table(table, buf, compression=codec,
compression_level=level)
def test_sanitized_spark_field_names():
a0 = pa.array([0, 1, 2, 3, 4])
name = 'prohib; ,\t{}'
table = pa.Table.from_arrays([a0], [name])
result = _roundtrip_table(table, write_table_kwargs={'flavor': 'spark'})
expected_name = 'prohib______'
assert result.schema[0].name == expected_name
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_multithreaded_read(use_legacy_dataset):
df = alltypes_sample(size=10000)
table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(table, buf, compression='SNAPPY', version='2.0')
buf.seek(0)
table1 = _read_table(
buf, use_threads=True, use_legacy_dataset=use_legacy_dataset)
buf.seek(0)
table2 = _read_table(
buf, use_threads=False, use_legacy_dataset=use_legacy_dataset)
assert table1.equals(table2)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_min_chunksize(use_legacy_dataset):
data = pd.DataFrame([np.arange(4)], columns=['A', 'B', 'C', 'D'])
table = pa.Table.from_pandas(data.reset_index())
buf = io.BytesIO()
_write_table(table, buf, chunk_size=-1)
buf.seek(0)
result = _read_table(buf, use_legacy_dataset=use_legacy_dataset)
assert result.equals(table)
with pytest.raises(ValueError):
_write_table(table, buf, chunk_size=0)
@pytest.mark.pandas
def test_write_error_deletes_incomplete_file(tempdir):
# ARROW-1285
df = pd.DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'i': pd.date_range('20130101', periods=3, freq='ns')})
pdf = pa.Table.from_pandas(df)
filename = tempdir / 'tmp_file'
try:
_write_table(pdf, filename)
except pa.ArrowException:
pass
assert not filename.exists()
@parametrize_legacy_dataset
def test_read_non_existent_file(tempdir, use_legacy_dataset):
path = 'non-existent-file.parquet'
try:
pq.read_table(path, use_legacy_dataset=use_legacy_dataset)
except Exception as e:
assert path in e.args[0]
@parametrize_legacy_dataset
def test_read_table_doesnt_warn(datadir, use_legacy_dataset):
with pytest.warns(None) as record:
pq.read_table(datadir / 'v0.7.1.parquet',
use_legacy_dataset=use_legacy_dataset)
assert len(record) == 0
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_zlib_compression_bug(use_legacy_dataset):
# ARROW-3514: "zlib deflate failed, output buffer too small"
table = pa.Table.from_arrays([pa.array(['abc', 'def'])], ['some_col'])
f = io.BytesIO()
pq.write_table(table, f, compression='gzip')
f.seek(0)
roundtrip = pq.read_table(f, use_legacy_dataset=use_legacy_dataset)
tm.assert_frame_equal(roundtrip.to_pandas(), table.to_pandas())
@parametrize_legacy_dataset
def test_parquet_file_too_small(tempdir, use_legacy_dataset):
path = str(tempdir / "test.parquet")
# TODO(dataset) with datasets API it raises OSError instead
with pytest.raises((pa.ArrowInvalid, OSError),
match='size is 0 bytes'):
with open(path, 'wb') as f:
pass
pq.read_table(path, use_legacy_dataset=use_legacy_dataset)
with pytest.raises((pa.ArrowInvalid, OSError),
match='size is 4 bytes'):
with open(path, 'wb') as f:
f.write(b'ffff')
pq.read_table(path, use_legacy_dataset=use_legacy_dataset)
@pytest.mark.pandas
@pytest.mark.fastparquet
@pytest.mark.filterwarnings("ignore:RangeIndex:FutureWarning")
@pytest.mark.filterwarnings("ignore:tostring:DeprecationWarning:fastparquet")
def test_fastparquet_cross_compatibility(tempdir):
fp = pytest.importorskip('fastparquet')
df = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(4.0, 7.0, dtype="float64"),
"d": [True, False, True],
"e": pd.date_range("20130101", periods=3),
"f": pd.Categorical(["a", "b", "a"]),
# fastparquet writes list as BYTE_ARRAY JSON, so no roundtrip
# "g": [[1, 2], None, [1, 2, 3]],
}
)
table = pa.table(df)
# Arrow -> fastparquet
file_arrow = str(tempdir / "cross_compat_arrow.parquet")
pq.write_table(table, file_arrow, compression=None)
fp_file = fp.ParquetFile(file_arrow)
df_fp = fp_file.to_pandas()
tm.assert_frame_equal(df, df_fp)
# Fastparquet -> arrow
file_fastparquet = str(tempdir / "cross_compat_fastparquet.parquet")
fp.write(file_fastparquet, df)
table_fp = pq.read_pandas(file_fastparquet)
# for fastparquet written file, categoricals comes back as strings
# (no arrow schema in parquet metadata)
df['f'] = df['f'].astype(object)
tm.assert_frame_equal(table_fp.to_pandas(), df)
@parametrize_legacy_dataset
@pytest.mark.parametrize('array_factory', [
lambda: pa.array([0, None] * 10),
lambda: pa.array([0, None] * 10).dictionary_encode(),
lambda: pa.array(["", None] * 10),
lambda: pa.array(["", None] * 10).dictionary_encode(),
])
@pytest.mark.parametrize('use_dictionary', [False, True])
@pytest.mark.parametrize('read_dictionary', [False, True])
def test_buffer_contents(
array_factory, use_dictionary, read_dictionary, use_legacy_dataset
):
# Test that null values are deterministically initialized to zero
# after a roundtrip through Parquet.
# See ARROW-8006 and ARROW-8011.
orig_table = pa.Table.from_pydict({"col": array_factory()})
bio = io.BytesIO()
pq.write_table(orig_table, bio, use_dictionary=True)
bio.seek(0)
read_dictionary = ['col'] if read_dictionary else None
table = pq.read_table(bio, use_threads=False,
read_dictionary=read_dictionary,
use_legacy_dataset=use_legacy_dataset)
for col in table.columns:
[chunk] = col.chunks
buf = chunk.buffers()[1]
assert buf.to_pybytes() == buf.size * b"\0"
def test_parquet_compression_roundtrip(tempdir):
# ARROW-10480: ensure even with nonstandard Parquet file naming
# conventions, writing and then reading a file works. In
# particular, ensure that we don't automatically double-compress
# the stream due to auto-detecting the extension in the filename
table = pa.table([pa.array(range(4))], names=["ints"])
path = tempdir / "arrow-10480.pyarrow.gz"
pq.write_table(table, path, compression="GZIP")
result = pq.read_table(path)
assert result.equals(table)
def test_empty_row_groups(tempdir):
# ARROW-3020
table = pa.Table.from_arrays([pa.array([], type='int32')], ['f0'])
path = tempdir / 'empty_row_groups.parquet'
num_groups = 3
with pq.ParquetWriter(path, table.schema) as writer:
for i in range(num_groups):
writer.write_table(table)
reader = pq.ParquetFile(path)
assert reader.metadata.num_row_groups == num_groups
for i in range(num_groups):
assert reader.read_row_group(i).equals(table)
def test_reads_over_batch(tempdir):
data = [None] * (1 << 20)
data.append([1])
# Large list<int64> with mostly nones and one final
# value. This should force batched reads when
# reading back.
table = pa.Table.from_arrays([data], ['column'])
path = tempdir / 'arrow-11607.parquet'
pq.write_table(table, path)
table2 = pq.read_table(path)
assert table == table2
|
apache-2.0
|
frank-tancf/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
25
|
25114
|
# Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
from sklearn.utils import check_array
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_) -
np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have selected an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have selected an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=50, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=10, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 10, 3), clf.mse_path_.shape)
assert_equal((2, 10), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=10, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((10, 3), clf.mse_path_.shape)
assert_equal(10, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
def test_check_input_false():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
X = check_array(X, order='F', dtype='float64')
y = check_array(X, order='F', dtype='float64')
clf = ElasticNet(selection='cyclic', tol=1e-8)
# Check that no error is raised if data is provided in the right format
clf.fit(X, y, check_input=False)
X = check_array(X, order='F', dtype='float32')
clf.fit(X, y, check_input=True)
# Check that an error is raised if data is provided in the wrong dtype,
# because of check bypassing
assert_raises(ValueError, clf.fit, X, y, check_input=False)
# With no input checking, providing X in C order should result in false
# computation
X = check_array(X, order='C', dtype='float64')
assert_raises(ValueError, clf.fit, X, y, check_input=False)
def test_overrided_gram_matrix():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
Gram = X.T.dot(X)
clf = ElasticNet(selection='cyclic', tol=1e-8, precompute=Gram,
fit_intercept=True)
assert_warns_message(UserWarning,
"Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
clf.fit, X, y)
def test_lasso_non_float_y():
X = [[0, 0], [1, 1], [-1, -1]]
y = [0, 1, 2]
y_float = [0.0, 1.0, 2.0]
for model in [ElasticNet, Lasso]:
clf = model(fit_intercept=False)
clf.fit(X, y)
clf_float = model(fit_intercept=False)
clf_float.fit(X, y_float)
assert_array_equal(clf.coef_, clf_float.coef_)
|
bsd-3-clause
|
erh3cq/hyperspy
|
hyperspy/drawing/_markers/vertical_line_segment.py
|
4
|
3486
|
# -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from hyperspy.drawing.marker import MarkerBase
class VerticalLineSegment(MarkerBase):
"""Vertical line segment marker that can be added to the signal figure
Parameters
----------
x : array or float
The position of line segment in x.
If float, the marker is fixed.
If array, the marker will be updated when navigating. The array should
have the same dimensions in the navigation axes.
y1 : array or float
The position of the start of the line segment in x.
see x1 arguments
y2 : array or float
The position of the start of the line segment in y.
see x1 arguments
kwargs :
Keywords argument of axvline valid properties (i.e. recognized by
mpl.plot).
Example
-------
>>> im = hs.signals.Signal2D(np.zeros((100, 100)))
>>> m = hs.plot.markers.vertical_line_segment(
>>> x=20, y1=30, y2=70, linewidth=4, color='red', linestyle='dotted')
>>> im.add_marker(m)
Add a marker permanently to a marker
>>> im = hs.signals.Signal2D(np.zeros((60, 60)))
>>> m = hs.plot.markers.vertical_line_segment(x=10, y1=20, y2=50)
>>> im.add_marker(m, permanent=True)
"""
def __init__(self, x, y1, y2, **kwargs):
MarkerBase.__init__(self)
lp = {'color': 'black', 'linewidth': 1}
self.marker_properties = lp
self.set_data(x1=x, y1=y1, y2=y2)
self.set_marker_properties(**kwargs)
self.name = 'vertical_line_segment'
def __repr__(self):
string = "<marker.{}, {} (x={},y1={},y2={},color={})>".format(
self.__class__.__name__,
self.name,
self.get_data_position('x1'),
self.get_data_position('y1'),
self.get_data_position('y2'),
self.marker_properties['color'],
)
return(string)
def update(self):
if self.auto_update is False:
return
self._update_segment()
def _plot_marker(self):
self.marker = self.ax.vlines(0, 0, 1, **self.marker_properties)
self._update_segment()
def _update_segment(self):
segments = self.marker.get_segments()
segments[0][0, 0] = self.get_data_position('x1')
segments[0][1, 0] = segments[0][0, 0]
if self.get_data_position('y1') is None:
segments[0][0, 1] = plt.getp(self.marker.axes, 'ylim')[0]
else:
segments[0][0, 1] = self.get_data_position('y1')
if self.get_data_position('y2') is None:
segments[0][1, 1] = plt.getp(self.marker.axes, 'ylim')[1]
else:
segments[0][1, 1] = self.get_data_position('y2')
self.marker.set_segments(segments)
|
gpl-3.0
|
pythonvietnam/scikit-learn
|
doc/datasets/mldata_fixture.py
|
367
|
1183
|
"""Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
|
bsd-3-clause
|
mac389/at-risk-agents
|
graphmovie-test.py
|
1
|
2119
|
import igraph
import numpy as np
import matplotlib.pyplot as plt
from GraphMovie import GraphMovie
n = {'nodes':50,'edges':3}
min_sigmoid = 0.5
max_sigmoid = np.exp(1)/(1.+np.exp(1))
sigmoid = lambda value: (1./(1+np.exp(-value))-min_sigmoid)/(max_sigmoid-min_sigmoid)
colorify = lambda color: (255*color,0,0)
'''
Create the GraphMovie object.
'''
m = GraphMovie()
g = igraph.Graph.Barabasi(n['nodes'],n['edges'])
m.addGraph(g)
degrees = np.array(g.degree()).astype(float)
degrees /= degrees.max()
map(sigmoid,degrees)
for id,node in enumerate(g.vs):
g.vs[id]['color'] = tuple([degrees[id]]*3)
g.vs[id]['label'] = str(id)
m.addGraph(g)
alpha = degrees
timesteps = 10
#--initial conditions
INITIAL = 0
attitudes = np.zeros((n['nodes'],2*timesteps))
attitudes[:,INITIAL] = np.random.random_sample(size=(n['nodes'],))
for t in xrange(1,timesteps):
for agent,info in enumerate(g.vs):
attitudes[agent,t] = (1-alpha[agent])*attitudes[agent,t-1] + alpha[agent]*attitudes[g.neighbors(agent),t-1].mean()
g.vs[agent]['color'] = colorify(attitudes[agent,t].tolist())
m.addGraph(g)
#change the susceptibility of the most tolerant agent
most_tolerant_agent = np.argmax(attitudes[:,-1])
alpha[most_tolerant_agent] = 0
attitudes[most_tolerant_agent,-1] = 1
for t in xrange(timesteps,2*timesteps):
for agent,info in enumerate(g.vs):
attitudes[agent,t] = (1-alpha[agent])*attitudes[agent,t-1] + alpha[agent]*attitudes[g.neighbors(agent),t-1].mean()
g.vs[agent]['color'] = colorify(attitudes[agent,t].tolist())
m.addGraph(g)
'''
Now process the layouts, render the frames, and generate the movie.
'''
m.doMovieLayout()
m.renderMovie()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(attitudes[:,INITIAL],color='r',alpha=0.5,range=(0,1),bins=20,label='Initial')
plt.hold(True)
ax.hist(attitudes[:,timesteps],color='b',alpha=0.5,range=(0,1),bins=20,label='Before polarizer')
ax.hist(attitudes[:,-1],color='k',alpha=0.5,range=(0,1),bins=20,label='After polarizer')
ax.set_ylabel('Frequency')
ax.set_xlabel('Attitude')
plt.legend(frameon=False)
plt.show()
|
mit
|
gigascience/galaxy-genome-diversity
|
tools/dpmix/dpmix_plot.py
|
1
|
14866
|
#!/usr/bin/env python
import os
import sys
import math
import matplotlib as mpl
mpl.use('PDF')
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
################################################################################
def build_chrom_len_dict(dbkey, galaxy_data_index_dir):
chrom_len_root = os.path.join(galaxy_data_index_dir, 'shared/ucsc/chrom')
chrom_len_file = '{0}.len'.format(dbkey)
chrom_len_path = os.path.join(chrom_len_root, chrom_len_file)
chrom_len = {}
try:
with open(chrom_len_path) as fh:
for line in fh:
line = line.rstrip('\r\n')
elems = line.split()
if len(elems) == 2:
chrom = elems[0]
length = int(elems[1])
chrom_len[chrom] = length
except:
pass
return chrom_len
def parse_input_file(input_file):
chroms = []
individuals = []
data = {}
chrom_len = {}
used_states = []
with open(input_file) as fh:
for line in fh:
line = line.strip()
if line:
elems = line.split()
chrom = elems[0]
p1, p2, state = map(int, elems[1:4])
id = elems[4]
if state not in used_states:
used_states.append(state)
if chrom not in chroms:
chroms.append(chrom)
if id not in individuals:
individuals.append(id)
data.setdefault(chrom, {})
data[chrom].setdefault(id, [])
data[chrom][id].append((p1, p2, state))
if p2 > chrom_len.setdefault(chrom, 0):
chrom_len[chrom] = p2
return chroms, individuals, data, chrom_len, used_states
def check_chroms(chroms, chrom_len, dbkey):
error = 0
for chrom in chroms:
if chrom not in chrom_len:
print >> sys.stderr, "Can't find length for {0} chromosome {1}".format(dbkey, chrom)
error = 1
if error:
sys.exit(1)
def check_data(data, chrom_len, dbkey):
error = 0
for chrom in data:
chrom_beg = 0
chrom_end = chrom_len[chrom]
for individual in data[chrom]:
for p1, p2, state in data[chrom][individual]:
if p1 >= p2:
print >> sys.stderr, "Bad data line: begin >= end: {0} {1} {2} {3}".format(chrom, p1, p2, state, individual)
error = 1
if p1 < chrom_beg or p2 > chrom_end:
print >> sys.stderr, "Bad data line: outside {0} boundaries[{1} - {2}]: {3} {4} {5} {6}".format(dbkey, chrom_beg, chrom_end, chrom, p1, p2, state, individual)
error = 1
if error:
sys.exit(1)
def make_rectangle(p1, p2, color, bottom=0.0, top=1.0):
verts = [
(p1, bottom), # left, bottom
(p1, top), # left, top
(p2, top), # right, top
(p2, bottom), # right, bottom
(0.0, 0.0) # ignored
]
codes = [
Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY
]
path = Path(verts, codes)
return patches.PathPatch(path, facecolor=color, lw=0)
def make_split_rectangle(p1, p2, top_color, bottom_color):
patch1 = make_rectangle(p1, p2, bottom_color, top=0.5)
patch2 = make_rectangle(p1, p2, top_color, bottom=0.5)
return [patch1, patch2]
def make_state_rectangle_2pop(p1, p2, state, chrom, individual):
p1_color = 'r'
p2_color = 'g'
heterochromatin_color = '#c7c7c7'
if state == 0:
return [ make_rectangle(p1, p2, heterochromatin_color) ]
elif state == 1:
return [ make_rectangle(p1, p2, p1_color) ]
elif state == 2:
return [ make_rectangle(p1, p2, p2_color) ]
elif state == 3:
return make_split_rectangle(p1, p2, p1_color, p2_color)
else:
print >> sys.stderr, "Unknown state: {0}: {1} {2} {3} {4}".format(state, chrom, p1, p2, state, individual)
sys.exit(1)
def make_state_rectangle_3pop(p1, p2, state, chrom, individual):
p1_color = 'r'
p2_color = 'g'
p3_color = 'b'
heterochromatin_color = '#c7c7c7'
if state == 0:
return [ make_rectangle(p1, p2, heterochromatin_color) ]
if state == 1:
return [ make_rectangle(p1, p2, p1_color) ]
if state == 2:
return [ make_rectangle(p1, p2, p2_color) ]
if state == 3:
return [ make_rectangle(p1, p2, p3_color) ]
if state == 4:
return make_split_rectangle(p1, p2, p1_color, p2_color)
if state == 5:
return make_split_rectangle(p1, p2, p1_color, p3_color)
if state == 6:
return make_split_rectangle(p1, p2, p2_color, p3_color)
else:
print >> sys.stderr, "Unknown state: {0}: {1} {2} {3} {4}".format(state, chrom, p1, p2, state, individual)
sys.exit(1)
def nicenum(num, round=False):
if num == 0:
return 0.0
exp = int(math.floor(math.log10(num)))
f = num / math.pow(10, exp)
if round:
if f < 1.5:
nf = 1.0
elif f < 3.0:
nf = 2.0
elif f < 7.0:
nf = 5.0
else:
nf = 10.0
else:
if f <= 1.0:
nf = 1.0
elif f <= 2.0:
nf = 2.0
elif f <= 5.0:
nf = 5.0
else:
nf = 10.0
return nf * pow(10, exp)
def tick_foo(beg, end, loose=False):
ntick = 10
range = nicenum(end - beg, round=False)
d = nicenum(range/(ntick - 1), round=True)
digits = int(math.floor(math.log10(d)))
if loose:
graph_min = math.floor(beg/d) * d
graph_max = math.ceil(end/d) * d
else:
graph_min = beg
graph_max = end
nfrac = max([-1 * digits, 0])
vals = []
stop = graph_max
if loose:
stop = graph_max + (0.5 * d)
x = graph_min
while x <= stop:
vals.append(int(x))
x += d
vals = vals[1:]
# if not loose:
# if vals[-1] < graph_max:
# vals.append(int(graph_max))
labels = []
for val in vals:
labels.append('{0}'.format(int(val/math.pow(10, digits))))
# labels.append('{0:.1f}'.format(vals[-1]/math.pow(10, digits)))
return vals, labels
################################################################################
################################################################################
################################################################################
################################################################################
def space_for_legend(plot_params):
space = 0.0
legend_states = plot_params['legend_states']
if legend_states:
ind_space = plot_params['ind_space']
ind_height = plot_params['ind_height']
space += len(legend_states) * (ind_space + ind_height) - ind_space
return space
################################################################################
def space_for_chroms(plot_params, chroms, individuals, data):
space_dict = {}
chrom_height = plot_params['chrom_height']
ind_space = plot_params['ind_space']
ind_height = plot_params['ind_height']
for chrom in chroms:
space_dict[chrom] = chrom_height
individual_count = 0
for individual in individuals:
if individual in data[chrom]:
individual_count += 1
space_dict[chrom] += individual_count * (ind_space + ind_height)
return space_dict
################################################################################
def make_dpmix_plot(input_dbkey, input_file, output_file, galaxy_data_index_dir, state2name=None, populations=3):
fs_chrom_len = build_chrom_len_dict(input_dbkey, galaxy_data_index_dir)
chroms, individuals, data, chrom_len, used_states = parse_input_file(input_file)
## populate chrom_len
for chrom in chrom_len.keys():
if chrom in fs_chrom_len:
chrom_len[chrom] = fs_chrom_len[chrom]
#check_chroms(chroms, chrom_len, input_dbkey)
check_data(data, chrom_len, input_dbkey)
## plot parameters
plot_params = {
'plot_dpi': 300,
'page_width': 8.50,
'page_height': 11.00,
'top_margin': 0.10,
'bottom_margin': 0.10,
'chrom_space': 0.25,
'chrom_height': 0.25,
'ind_space': 0.10,
'ind_height': 0.25,
'legend_space': 0.10
}
## in the legend, only print out states that are
## 1) in the data
## - AND -
## 2) in the state2name map
legend_states = []
if state2name is not None:
for state in used_states:
if state in state2name:
legend_states.append(state)
plot_params['legend_states'] = legend_states
## choose the correct make_state_rectangle method
if populations == 3:
plot_params['rectangle_method'] = make_state_rectangle_3pop
elif populations == 2:
plot_params['rectangle_method'] = make_state_rectangle_2pop
pdf_pages = PdfPages(output_file)
## generate a list of chroms for each page
needed_for_legend = space_for_legend(plot_params)
needed_for_chroms = space_for_chroms(plot_params, chroms, individuals, data)
chrom_space_per_page = plot_params['page_height']
chrom_space_per_page -= plot_params['top_margin'] + plot_params['bottom_margin']
chrom_space_per_page -= needed_for_legend + plot_params['legend_space']
chrom_space_per_page -= plot_params['chrom_space']
chroms_left = chroms[:]
pages = []
space_left = chrom_space_per_page
chrom_list = []
while chroms_left:
chrom = chroms_left.pop(0)
space_needed = needed_for_chroms[chrom] + plot_params['chrom_space']
if (space_needed > chrom_space_per_page):
print >> sys.stderr, 'Multipage chroms not yet supported'
sys.exit(1)
## sometimes 1.9 - 1.9 < 0 (-4.4408920985e-16)
## so, we make sure it's not more than a millimeter over
if space_left - space_needed > -0.04:
chrom_list.append(chrom)
space_left -= space_needed
else:
pages.append(chrom_list[:])
chrom_list = []
chroms_left.insert(0, chrom)
space_left = chrom_space_per_page
############################################################################
plot_dpi = plot_params['plot_dpi']
page_width = plot_params['page_width']
page_height = plot_params['page_height']
top_margin = plot_params['top_margin']
ind_space = plot_params['ind_space']
ind_height = plot_params['ind_height']
make_state_rectangle = plot_params['rectangle_method']
legend_space = plot_params['legend_space']
chrom_space = plot_params['chrom_space']
chrom_height = plot_params['chrom_height']
for page in pages:
fig = plt.figure(figsize=(page_width, page_height), dpi=plot_dpi)
bottom = 1.0 - (top_margin/page_height)
# print legend
if legend_states:
top = True
for state in sorted(legend_states):
if top:
bottom -= ind_height/page_height
top = False
else:
bottom -= (ind_space + ind_height)/page_height
ax1 = fig.add_axes([0.0, bottom, 0.09, ind_height/page_height])
plt.axis('off')
ax1.set_xlim(0, 1)
ax1.set_ylim(0, 1)
for patch in make_state_rectangle(0, 1, state, 'legend', state2name[state]):
ax1.add_patch(patch)
ax2 = fig.add_axes([0.10, bottom, 0.88, ind_height/page_height], frame_on=False)
plt.axis('off')
plt.text(0.0, 0.5, state2name[state], fontsize=10, ha='left', va='center')
bottom -= legend_space/page_height
# print chroms
top = True
for chrom in page:
length = chrom_len[chrom]
vals, labels = tick_foo(0, length)
if top:
bottom -= chrom_height/page_height
top = False
else:
bottom -= (chrom_space + chrom_height)/page_height
ax = fig.add_axes([0.0, bottom, 1.0, chrom_height/page_height])
plt.axis('off')
plt.text(0.5, 0.5, chrom, fontsize=14, ha='center')
individual_count = 0
for individual in individuals:
if individual in data[chrom]:
individual_count += 1
i = 0
for individual in individuals:
if individual in data[chrom]:
i += 1
bottom -= (ind_space + ind_height)/page_height
ax1 = fig.add_axes([0.0, bottom, 0.09, ind_height/page_height])
plt.axis('off')
plt.text(1.0, 0.5, individual, fontsize=10, ha='right', va='center')
ax2 = fig.add_axes([0.10, bottom, 0.88, ind_height/page_height], frame_on=False)
ax2.set_xlim(0, length)
ax2.set_ylim(0, 1)
if i != individual_count:
plt.axis('off')
else:
ax2.tick_params(top=False, left=False, right=False, labelleft=False)
ax2.set_xticks(vals)
ax2.set_xticklabels(labels)
for p1, p2, state in sorted(data[chrom][individual]):
for patch in make_state_rectangle(p1, p2, state, chrom, individual):
ax2.add_patch(patch)
# extend last state to end of chrom
if p2 < length:
for patch in make_state_rectangle(p2, length, state, chrom, individual):
ax2.add_patch(patch)
pdf_pages.savefig(fig)
plt.close(fig)
pdf_pages.close()
################################################################################
if __name__ == '__main__':
make_dpmix_plot('loxAfr3', 'output.dat', 'output2_files/picture.pdf', '/scratch/galaxy/home/oocyte/galaxy_oocyte/tool-data', state2name={0: 'heterochromatin', 1: 'reference', 2: 'asian'}, populations=2)
# input_dbkey, input_file, output_file, galaxy_data_index_dir = sys.argv[1:5]
# make_dpmix_plot(input_dbkey, input_file, output_file, galaxy_data_index_dir)
sys.exit(0)
## notes
# 1) pass in a state to name mapping
# 2) only print out names for states which exist in the data, and are in the state to name mapping
|
gpl-3.0
|
zihua/scikit-learn
|
sklearn/tests/test_learning_curve.py
|
59
|
10869
|
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
|
bsd-3-clause
|
ankurankan/scikit-learn
|
sklearn/learning_curve.py
|
28
|
13300
|
"""Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import _check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(_check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
|
bsd-3-clause
|
btabibian/scikit-learn
|
benchmarks/bench_sgd_regression.py
|
61
|
5612
|
"""
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
plt.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
plt.subplot(m, 2, i + 1)
plt.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
plt.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
plt.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
plt.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
plt.legend(prop={"size": 10})
plt.xlabel("n_train")
plt.ylabel("RMSE")
plt.title("Test error - %d features" % list_n_features[j])
i += 1
plt.subplot(m, 2, i + 1)
plt.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
plt.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
plt.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
plt.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
plt.legend(prop={"size": 10})
plt.xlabel("n_train")
plt.ylabel("Time [sec]")
plt.title("Training time - %d features" % list_n_features[j])
i += 1
plt.subplots_adjust(hspace=.30)
plt.show()
|
bsd-3-clause
|
karuppayya/zeppelin
|
python/src/main/resources/python/bootstrap_sql.py
|
60
|
1189
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Setup SQL over Pandas DataFrames
# It requires next dependencies to be installed:
# - pandas
# - pandasql
from __future__ import print_function
try:
from pandasql import sqldf
pysqldf = lambda q: sqldf(q, globals())
except ImportError:
pysqldf = lambda q: print("Can not run SQL over Pandas DataFrame" +
"Make sure 'pandas' and 'pandasql' libraries are installed")
|
apache-2.0
|
Chuban/moose
|
modules/tensor_mechanics/test/tests/capped_drucker_prager/small_deform2.py
|
23
|
3933
|
#!/usr/bin/env python
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
def expected(scheme, angle_degrees):
angle = angle_degrees * np.pi / 180.0
cohesion = 10
friction_degrees = 20
tip_smoother = 4
mean = -10
friction = friction_degrees * np.pi / 180.0
if (scheme == "native"):
coh = cohesion
fric = friction
elif (scheme == "outer_tip"):
coh = 2 * np.sqrt(3) * cohesion * np.cos(friction) / (3.0 - np.sin(friction))
fric = np.arctan(2 * np.sin(friction) / np.sqrt(3) / (3.0 - np.sin(friction)))
elif (scheme == "inner_tip"):
coh = 2 * np.sqrt(3) * cohesion * np.cos(friction) / (3.0 + np.sin(friction))
fric = np.arctan(2 * np.sin(friction) / np.sqrt(3) / (3.0 + np.sin(friction)))
elif (scheme == "lode_zero"):
coh = cohesion * np.cos(friction)
fric = np.arctan(np.sin(friction) / 3.0)
elif (scheme == "inner_edge"):
coh = 3 * cohesion * np.cos(friction) / np.sqrt(9.0 + 3.0 * np.power(np.sin(friction), 2))
fric = np.arctan(np.sin(friction) / np.sqrt(9.0 + 3.0 * np.power(np.sin(friction), 2)))
bar = np.sqrt(np.power(coh - mean * 3.0 * np.tan(fric), 2) - np.power(tip_smoother, 2))
x = bar * np.cos(angle)
y = bar * np.sin(angle)
return (x, y)
def sigma_mean(stress):
return (stress[0] + stress[3] + stress[5])/3.0
def sigma_bar(stress):
mean = sigma_mean(stress)
return np.sqrt(0.5 * (np.power(stress[0] - mean, 2) + 2*stress[1]*stress[1] + 2*stress[2]*stress[2] + np.power(stress[3] - mean, 2) + 2*stress[4]*stress[4] + np.power(stress[5] - mean, 2)))
def third_inv(stress):
mean = sigma_mean(stress)
return (stress[0] - mean)*(stress[3] - mean)*(stress[5] - mean)
def lode_angle(stress):
bar = sigma_bar(stress)
third = third_inv(stress)
return np.arcsin(-1.5 * np.sqrt(3.0) * third / np.power(bar, 3)) / 3.0
def moose_result(fn):
f = open(fn)
x = []
y = []
for line in f:
if not line.strip():
continue
line = line.strip()
if line.startswith("time") or line.startswith("0"):
continue
line = map(float, line.split(","))
if line[1] < -1E-10:
continue # this is an elastic deformation
bar = sigma_bar(line[4:])
lode = lode_angle(line[4:])
x.append(bar * np.cos(lode))
y.append(bar * np.sin(lode))
f.close()
return (x, y)
angles = np.arange(-30, 31, 1)
plt.figure()
plt.plot(expected("native", angles)[0], expected("native", angles)[1], 'k-', label = 'expected (native)')
mr = moose_result("gold/small_deform2_native.csv")
plt.plot(mr[0], mr[1], 'k^', label = 'MOOSE (native)')
plt.plot(expected("outer_tip", angles)[0], expected("outer_tip", angles)[1], 'g-', label = 'expected (outer_tip)')
mr = moose_result("gold/small_deform2_outer_tip.csv")
plt.plot(mr[0], mr[1], 'g^', label = 'MOOSE (outer_tip)')
plt.plot(expected("inner_tip", angles)[0], expected("inner_tip", angles)[1], 'b-', label = 'expected (inner_tip)')
mr = moose_result("gold/small_deform2_inner_tip.csv")
plt.plot(mr[0], mr[1], 'b^', label = 'MOOSE (inner_tip)')
plt.plot(expected("lode_zero", angles)[0], expected("lode_zero", angles)[1], 'c-', label = 'expected (lode_zero)')
mr = moose_result("gold/small_deform2_lode_zero.csv")
plt.plot(mr[0], mr[1], 'c^', label = 'MOOSE (lode_zero)')
plt.plot(expected("inner_edge", angles)[0], expected("inner_edge", angles)[1], 'r-', label = 'expected (inner_edge)')
mr = moose_result("gold/small_deform2_inner_edge.csv")
plt.plot(mr[0], mr[1], 'r^', label = 'MOOSE (inner_edge)')
legend = plt.legend(bbox_to_anchor=(1.16, 0.95))
for label in legend.get_texts():
label.set_fontsize('small')
plt.xlabel("Stress")
plt.ylabel("Stress")
plt.title("Drucker-Prager yield function on octahedral plane")
plt.axis([5, 25, -12, 12])
plt.savefig("small_deform2.png")
sys.exit(0)
|
lgpl-2.1
|
effigies/mne-python
|
mne/fixes.py
|
1
|
20716
|
"""Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
# XXX : copied from scikit-learn
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD
import collections
from operator import itemgetter
import inspect
import warnings
import numpy as np
import scipy
from scipy import linalg, sparse
from math import ceil, log
from numpy.fft import irfft
from scipy.signal import filtfilt as sp_filtfilt
from distutils.version import LooseVersion
from functools import partial
from .externals import six
from .externals.six.moves import copyreg
from gzip import GzipFile
###############################################################################
# Misc
class gzip_open(GzipFile): # python2.6 doesn't have context managing
def __init__(self, *args, **kwargs):
return GzipFile.__init__(self, *args, **kwargs)
def __enter__(self):
if hasattr(GzipFile, '__enter__'):
return GzipFile.__enter__(self)
else:
return self
def __exit__(self, exc_type, exc_value, traceback):
if hasattr(GzipFile, '__exit__'):
return GzipFile.__exit__(self, exc_type, exc_value, traceback)
else:
return self.close()
class _Counter(collections.defaultdict):
"""Partial replacement for Python 2.7 collections.Counter."""
def __init__(self, iterable=(), **kwargs):
super(_Counter, self).__init__(int, **kwargs)
self.update(iterable)
def most_common(self):
return sorted(six.iteritems(self), key=itemgetter(1), reverse=True)
def update(self, other):
"""Adds counts for elements in other"""
if isinstance(other, self.__class__):
for x, n in six.iteritems(other):
self[x] += n
else:
for x in other:
self[x] += 1
try:
Counter = collections.Counter
except AttributeError:
Counter = _Counter
def _unique(ar, return_index=False, return_inverse=False):
"""A replacement for the np.unique that appeared in numpy 1.4.
While np.unique existed long before, keyword return_inverse was
only added in 1.4.
"""
try:
ar = ar.flatten()
except AttributeError:
if not return_inverse and not return_index:
items = sorted(set(ar))
return np.asarray(items)
else:
ar = np.asarray(ar).flatten()
if ar.size == 0:
if return_inverse and return_index:
return ar, np.empty(0, np.bool), np.empty(0, np.bool)
elif return_inverse or return_index:
return ar, np.empty(0, np.bool)
else:
return ar
if return_inverse or return_index:
perm = ar.argsort()
aux = ar[perm]
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if return_inverse:
iflag = np.cumsum(flag) - 1
iperm = perm.argsort()
if return_index:
return aux[flag], perm[flag], iflag[iperm]
else:
return aux[flag], iflag[iperm]
else:
return aux[flag], perm[flag]
else:
ar.sort()
flag = np.concatenate(([True], ar[1:] != ar[:-1]))
return ar[flag]
if LooseVersion(np.__version__) < LooseVersion('1.5'):
unique = _unique
else:
unique = np.unique
def _bincount(X, weights=None, minlength=None):
"""Replacing np.bincount in numpy < 1.6 to provide minlength."""
result = np.bincount(X, weights)
if minlength is None or len(result) >= minlength:
return result
out = np.zeros(minlength, np.int)
out[:len(result)] = result
return out
if LooseVersion(np.__version__) < LooseVersion('1.6'):
bincount = _bincount
else:
bincount = np.bincount
def _copysign(x1, x2):
"""Slow replacement for np.copysign, which was introduced in numpy 1.4"""
return np.abs(x1) * np.sign(x2)
if not hasattr(np, 'copysign'):
copysign = _copysign
else:
copysign = np.copysign
def _in1d(ar1, ar2, assume_unique=False):
"""Replacement for in1d that is provided for numpy >= 1.4"""
if not assume_unique:
ar1, rev_idx = unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
equal_adj = (sar[1:] == sar[:-1])
flag = np.concatenate((equal_adj, [False]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
if not hasattr(np, 'in1d'):
in1d = _in1d
else:
in1d = np.in1d
def _digitize(x, bins, right=False):
"""Replacement for digitize with right kwarg (numpy < 1.7).
Notes
-----
This fix is only meant for integer arrays. If ``right==True`` but either
``x`` or ``bins`` are of a different type, a NotImplementedError will be
raised.
"""
if right:
x = np.asarray(x)
bins = np.asarray(bins)
if (x.dtype.kind not in 'ui') or (bins.dtype.kind not in 'ui'):
raise NotImplementedError("Only implemented for integer input")
return np.digitize(x - 1e-5, bins)
else:
return np.digitize(x, bins)
if LooseVersion(np.__version__) < LooseVersion('1.7'):
digitize = _digitize
else:
digitize = np.digitize
def _tril_indices(n, k=0):
"""Replacement for tril_indices that is provided for numpy >= 1.4"""
mask = np.greater_equal(np.subtract.outer(np.arange(n), np.arange(n)), -k)
indices = np.where(mask)
return indices
if not hasattr(np, 'tril_indices'):
tril_indices = _tril_indices
else:
tril_indices = np.tril_indices
def _unravel_index(indices, dims):
"""Add support for multiple indices in unravel_index that is provided
for numpy >= 1.4"""
indices_arr = np.asarray(indices)
if indices_arr.size == 1:
return np.unravel_index(indices, dims)
else:
if indices_arr.ndim != 1:
raise ValueError('indices should be one dimensional')
ndims = len(dims)
unraveled_coords = np.empty((indices_arr.size, ndims), dtype=np.int)
for coord, idx in zip(unraveled_coords, indices_arr):
coord[:] = np.unravel_index(idx, dims)
return tuple(unraveled_coords.T)
if LooseVersion(np.__version__) < LooseVersion('1.4'):
unravel_index = _unravel_index
else:
unravel_index = np.unravel_index
def _qr_economic_old(A, **kwargs):
"""
Compat function for the QR-decomposition in economic mode
Scipy 0.9 changed the keyword econ=True to mode='economic'
"""
with warnings.catch_warnings(record=True):
return linalg.qr(A, econ=True, **kwargs)
def _qr_economic_new(A, **kwargs):
return linalg.qr(A, mode='economic', **kwargs)
if LooseVersion(scipy.__version__) < LooseVersion('0.9'):
qr_economic = _qr_economic_old
else:
qr_economic = _qr_economic_new
def savemat(file_name, mdict, oned_as="column", **kwargs):
"""MATLAB-format output routine that is compatible with SciPy 0.7's.
0.7.2 (or .1?) added the oned_as keyword arg with 'column' as the default
value. It issues a warning if this is not provided, stating that "This will
change to 'row' in future versions."
"""
import scipy.io
try:
return scipy.io.savemat(file_name, mdict, oned_as=oned_as, **kwargs)
except TypeError:
return scipy.io.savemat(file_name, mdict, **kwargs)
if hasattr(np, 'count_nonzero'):
from numpy import count_nonzero
else:
def count_nonzero(X):
return len(np.flatnonzero(X))
# little dance to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
# wrap filtfilt, excluding padding arguments
def _filtfilt(*args, **kwargs):
# cut out filter args
if len(args) > 4:
args = args[:4]
if 'padlen' in kwargs:
del kwargs['padlen']
return sp_filtfilt(*args, **kwargs)
if 'padlen' not in inspect.getargspec(sp_filtfilt)[0]:
filtfilt = _filtfilt
else:
filtfilt = sp_filtfilt
###############################################################################
# Back porting firwin2 for older scipy
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def _firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0):
"""FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`. If the gain at the Nyquist rate, `gain[-1]`, is not 0,
then `numtaps` must be odd.
freq : array-like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array-like
The filter gains at the frequency sampling points.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
Returns
-------
taps : numpy 1D array of length `numtaps`
The filter coefficients of the FIR filter.
Examples
--------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> taps = firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
>>> print(taps[72:78])
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The filter is Type I if `numtaps`
is odd and Type II if `numtaps` is even. Because Type II filters always
have a zero at the Nyquist frequency, `numtaps` must be odd if `gain[-1]`
is not zero.
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s'
% (numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if numtaps % 2 == 0 and gain[-1] != 0.0:
raise ValueError("A filter with an even number of coefficients must "
"have zero gain at the Nyquist rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq) - 1 and freq[k] == freq[k + 1]:
freq[k] = freq[k] - eps
freq[k + 1] = freq[k + 1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from scipy.signal.signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
return out
if hasattr(scipy.signal, 'firwin2'):
from scipy.signal import firwin2
else:
firwin2 = _firwin2
###############################################################################
# Back porting matrix_rank for numpy < 1.7
def _matrix_rank(M, tol=None):
""" Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that
are greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for
linear least squares [2].
This default threshold is designed to detect rank deficiency accounting
for the numerical errors of the SVD computation. Imagine that there is a
column in `M` that is an exact (in floating point) linear combination of
other columns in `M`. Computing the SVD on `M` will not produce a
singular value exactly equal to 0 in general: any difference of the
smallest SVD value from 0 will be caused by numerical imprecision in the
calculation of the SVD. Our threshold for small SVD values takes this
numerical imprecision into account, and the default threshold will detect
such numerical rank deficiency. The threshold may declare a matrix `M`
rank deficient even if the linear combination of some columns of `M` is
not exactly equal to another column of `M` but only numerically very
close to another column of `M`.
We chose our default threshold because it is in wide use. Other
thresholds are possible. For example, elsewhere in the 2007 edition of
*Numerical recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance
values to detect *effective* rank deficiency. The most useful measure of
the tolerance depends on the operations you intend to use on your matrix.
For example, if your data come from uncertain measurements with
uncertainties greater than floating point epsilon, choosing a tolerance
near that uncertainty may be preferable. The tolerance may be absolute if
the uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = np.asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return np.int(not all(M == 0))
S = np.linalg.svd(M, compute_uv=False)
if tol is None:
tol = S.max() * np.max(M.shape) * np.finfo(S.dtype).eps
return np.sum(S > tol)
if LooseVersion(np.__version__) > '1.7.1':
from numpy.linalg import matrix_rank
else:
matrix_rank = _matrix_rank
def _reconstruct_partial(func, args, kwargs):
"""Helper to pickle partial functions"""
return partial(func, *args, **(kwargs or {}))
def _reduce_partial(p):
"""Helper to pickle partial functions"""
return _reconstruct_partial, (p.func, p.args, p.keywords)
# This adds pickling functionality to older Python 2.6
# Please always import partial from here.
copyreg.pickle(partial, _reduce_partial)
def normalize_colors(vmin, vmax, clip=False):
"""Helper to handle matplotlib API"""
import matplotlib.pyplot as plt
if 'Normalize' in vars(plt):
return plt.Normalize(vmin, vmax, clip=clip)
else:
return plt.normalize(vmin, vmax, clip=clip)
def _assert_true(expr, msg):
"""Fake assert_true without message"""
assert expr
try:
from nose.tools import assert_true
except ImportError:
assert_true = _assert_true
def _assert_is(expr1, expr2, msg=None):
"""Fake assert_is without message"""
assert_true(expr2 is expr2, msg)
def _assert_is_not(expr1, expr2, msg=None):
"""Fake assert_is_not without message"""
assert_true(expr2 is not expr2, msg)
try:
from nose.tools import assert_is, assert_is_not
except ImportError:
assert_is = _assert_is
assert_is_not = _assert_is_not
def _sparse_block_diag(mats, format=None, dtype=None):
"""An implementation of scipy.sparse.block_diag since old versions of
scipy don't have it. Forms a sparse matrix by stacking matrices in block
diagonal form.
Parameters
----------
mats : list of matrices
Input matrices.
format : str, optional
The sparse format of the result (e.g. "csr"). If not given, the
matrix is returned in "coo" format.
dtype : dtype specifier, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of blocks.
Returns
-------
res : sparse matrix
"""
nmat = len(mats)
rows = []
for ia, a in enumerate(mats):
row = [None] * nmat
row[ia] = a
rows.append(row)
return sparse.bmat(rows, format=format, dtype=dtype)
try:
from scipy.sparse import block_diag as sparse_block_diag
except Exception:
sparse_block_diag = _sparse_block_diag
|
bsd-3-clause
|
drodarie/nest-simulator
|
extras/ConnPlotter/colormaps.py
|
21
|
6941
|
# -*- coding: utf-8 -*-
#
# colormaps.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# ConnPlotter --- A Tool to Generate Connectivity Pattern Matrices
"""
Colormaps for ConnPlotter.
Provides the following functions and colormaps:
- make_colormap: based on color specification, create colormap
running from from white to fully saturated color
- redblue: from fully saturated red to white to fully saturated blue
- bluered: from fully saturated blue to white to fully saturated red
For all colormaps, "bad" values (NaN) are mapped to white.
Provides also ZeroCenterNorm, mapping negative values to 0..0.5,
positive to 0.5..1.
"""
# ----------------------------------------------------------------------------
import matplotlib.pyplot as plt
import matplotlib.colors as mc
import matplotlib.cbook as cbook
import numpy as np
__all__ = ['ZeroCenterNorm', 'make_colormap', 'redblue', 'bluered',
'bad_color']
# ----------------------------------------------------------------------------
bad_color = (1.0, 1.0, 0.9)
# ----------------------------------------------------------------------------
class ZeroCenterNorm(mc.Normalize):
"""
Normalize so that value 0 is always at 0.5.
Code from matplotlib.colors.Normalize.
Copyright (c) 2002-2009 John D. Hunter; All Rights Reserved
http://matplotlib.sourceforge.net/users/license.html
"""
# ------------------------------------------------------------------------
def __call__(self, value, clip=None):
"""
Normalize given values to [0,1].
Returns data in same form as passed in.
value can be scalar or array.
"""
if clip is not None and clip is not False:
assert (False) # clip not supported
if cbook.iterable(value):
vtype = 'array'
val = np.ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = np.ma.array([value]).astype(np.float)
self.autoscale_None(val)
self.vmin = min(0, self.vmin)
self.vmax = max(0, self.vmax)
# imshow expects masked arrays
# fill entire array with 0.5
result = np.ma.array(0.5 * np.ma.asarray(np.ones(np.shape(val))),
dtype=np.float, mask=val.mask)
# change values != 0
result[val < 0] = 0.5 * (self.vmin - val[val < 0]) / self.vmin
result[val > 0] = 0.5 + 0.5 * val[val > 0] / self.vmax
if vtype == 'scalar':
result = result[0]
return result
# ------------------------------------------------------------------------
def inverse(self, value):
"""
Invert color map. Required by colorbar().
"""
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = np.asarray(value)
res = np.zeros(np.shape(val))
res[val < 0.5] = vmin - 2 * vmin * val[val < 0.5]
res[val > 0.5] = 2 * (val[val > 0.5] - 0.5) * vmax
return res
else:
if value == 0.5:
return 0
elif value < 0.5:
return vmin - 2 * vmin * value # vmin < 0
else:
return 2 * (value - 0.5) * vmax
# ----------------------------------------------------------------------------
def make_colormap(color):
"""
Create LinearSegmentedColormap ranging from white to the given color.
Color can be given in any legal color format. Bad color is set to white.
"""
try:
r, g, b = mc.colorConverter.to_rgb(color)
except:
raise ValueError('Illegal color specification: %s' % color.__repr__)
cm = mc.LinearSegmentedColormap(color.__str__(),
{'red': [(0.0, 1.0, 1.0),
(1.0, r, r)],
'green': [(0.0, 1.0, 1.0),
(1.0, g, g)],
'blue': [(0.0, 1.0, 1.0),
(1.0, b, b)]})
cm.set_bad(color=bad_color) # light yellow
return cm
# ----------------------------------------------------------------------------
redblue = mc.LinearSegmentedColormap('redblue',
{'red': [(0.0, 0.0, 1.0),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)]})
redblue.set_bad(color=bad_color)
# ----------------------------------------------------------------------------
bluered = mc.LinearSegmentedColormap('bluered',
{'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0)],
'blue': [(0.0, 1.0, 1.0),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0)]})
bluered.set_bad(color=bad_color)
# ----------------------------------------------------------------------------
if __name__ == '__main__':
# this should be proper unit tests
n1 = ZeroCenterNorm()
if (n1([-1, -0.5, 0.0, 0.5, 1.0]).data == np.array(
[0, 0.25, 0.5, 0.75, 1.0])).all():
print("n1 ok")
else:
print("n1 failed.")
n2 = ZeroCenterNorm(-1, 2)
if (n2([-1, -0.5, 0.0, 1.0, 2.0]).data == np.array(
[0, 0.25, 0.5, 0.75, 1.0])).all():
print("n2 ok")
else:
print("n2 failed.")
|
gpl-2.0
|
maxlikely/scikit-learn
|
sklearn/linear_model/tests/test_base.py
|
8
|
3585
|
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD Style.
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.utils import check_random_state
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
"""
Test LinearRegression on a simple dataset.
"""
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_fit_intercept():
"""
Test assertions on betas shape.
"""
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
|
bsd-3-clause
|
shenzebang/scikit-learn
|
sklearn/cross_decomposition/tests/test_pls.py
|
215
|
11427
|
import numpy as np
from sklearn.utils.testing import (assert_array_almost_equal,
assert_array_equal, assert_true, assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
assert_array_almost_equal(pls_ca.x_rotations_, x_rotations)
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(pls_ca.y_rotations_, y_rotations)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
assert_array_almost_equal(pls_2.x_weights_, x_weights)
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
assert_array_almost_equal(pls_2.x_loadings_, x_loadings)
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_weights_, y_weights)
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_loadings_, y_loadings)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
assert_array_almost_equal(pls_ca.x_loadings_, x_loadings)
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(pls_ca.y_loadings_, y_loadings)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale():
d = load_linnerud()
X = d.data
Y = d.target
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
clf.fit(X, Y)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
|
bsd-3-clause
|
CodeMonkeyJan/hyperspy
|
hyperspy/tests/drawing/test_plot_signal1d.py
|
1
|
6027
|
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import scipy.misc
import pytest
import matplotlib.pyplot as plt
import os
from shutil import copyfile
import hyperspy.api as hs
from hyperspy.misc.test_utils import update_close_figure
from hyperspy.tests.drawing.test_plot_signal import _TestPlot
scalebar_color = 'blue'
default_tol = 2.0
baseline_dir = 'plot_signal1d'
style_pytest_mpl = 'default'
style = ['default', 'overlap', 'cascade', 'mosaic', 'heatmap']
def _generate_filename_list(style):
path = os.path.dirname(__file__)
filename_list = ['test_plot_spectra_%s' % s for s in style]
filename_list2 = []
for filename in filename_list:
for i in range(0, 4):
filename_list2.append(os.path.join(path, baseline_dir,
'%s%i.png' % (filename, i)))
return filename_list2
class TestPlotSpectra():
s = hs.signals.Signal1D(scipy.misc.ascent()[100:160:10])
@classmethod
def setup_class(cls):
# duplicate baseline images to match the test_name when the
# parametrized 'test_plot_spectra' are run. For a same 'style', the
# expected images are the same.
if pytest.config.getoption("--mpl-generate-path") is None:
for filename in _generate_filename_list(style):
copyfile("%s.png" % filename[:-5], filename)
@classmethod
def teardown_class(cls):
# Create the baseline images: copy one baseline image for each test
# and remove the other ones.
if pytest.config.getoption("--mpl-generate-path"):
for filename in _generate_filename_list(style):
copyfile(filename, "%s.png" % filename[:-5])
# Delete the images that have been created in 'setup_class'
for filename in _generate_filename_list(style):
os.remove(filename)
def _generate_parameters(style):
parameters = []
for s in style:
for fig in [True, None]:
for ax in [True, None]:
parameters.append([s, fig, ax])
return parameters
def _generate_ids(style, duplicate=4):
ids = []
for s in style:
ids.extend([s] * duplicate)
return ids
@pytest.mark.parametrize(("style", "fig", "ax"),
_generate_parameters(style),
ids=_generate_ids(style))
@pytest.mark.mpl_image_compare(baseline_dir=baseline_dir,
tolerance=default_tol, style=style_pytest_mpl)
def test_plot_spectra(self, mpl_cleanup, style, fig, ax):
if fig:
fig = plt.figure()
if ax:
fig = plt.figure()
ax = fig.add_subplot(111)
ax = hs.plot.plot_spectra(self.s, style=style, legend='auto',
fig=fig, ax=ax)
if style == 'mosaic':
ax = ax[0]
return ax.figure
@pytest.mark.parametrize("figure", ['1nav', '1sig', '2nav', '2sig'])
@pytest.mark.mpl_image_compare(baseline_dir=baseline_dir,
tolerance=default_tol, style=style_pytest_mpl)
def test_plot_spectra_sync(self, mpl_cleanup, figure):
s1 = hs.signals.Signal1D(scipy.misc.face()).as_signal1D(0).inav[:, :3]
s2 = s1.deepcopy() * -1
hs.plot.plot_signals([s1, s2])
if figure == '1nav':
return s1._plot.signal_plot.figure
if figure == '1sig':
return s1._plot.navigator_plot.figure
if figure == '2nav':
return s2._plot.navigator_plot.figure
if figure == '2sig':
return s2._plot.navigator_plot.figure
@update_close_figure
def test_plot_nav0_close():
test_plot = _TestPlot(ndim=0, sdim=1)
test_plot.signal.plot()
return test_plot.signal
@update_close_figure
def test_plot_nav1_close():
test_plot = _TestPlot(ndim=1, sdim=1)
test_plot.signal.plot()
return test_plot.signal
@update_close_figure
def test_plot_nav2_close():
test_plot = _TestPlot(ndim=2, sdim=1)
test_plot.signal.plot()
return test_plot.signal
def _test_plot_two_cursors(ndim):
test_plot = _TestPlot(ndim=ndim, sdim=1) # sdim=2 not supported
test_plot.signal.plot()
s = test_plot.signal
s.metadata.General.title = 'Nav %i, Sig 1, two cursor' % ndim
s.axes_manager[0].index = 4
s.plot()
s._plot.add_right_pointer()
s._plot.right_pointer.axes_manager[0].index = 2
if ndim == 2:
s.axes_manager[1].index = 2
s._plot.right_pointer.axes_manager[1].index = 3
return s
def _generate_parameter():
parameters = []
for ndim in [1, 2]:
for plot_type in ['nav', 'sig']:
parameters.append([ndim, plot_type])
return parameters
@pytest.mark.parametrize(("ndim", "plot_type"),
_generate_parameter())
@pytest.mark.mpl_image_compare(baseline_dir=baseline_dir,
tolerance=default_tol, style=style_pytest_mpl)
def test_plot_two_cursors(mpl_cleanup, ndim, plot_type):
s = _test_plot_two_cursors(ndim=ndim)
if plot_type == "sig":
return s._plot.signal_plot.figure
else:
return s._plot.navigator_plot.figure
@update_close_figure
def test_plot_nav2_sig1_two_cursors_close():
return _test_plot_two_cursors(ndim=2)
|
gpl-3.0
|
braghiere/Thesis
|
chapter4/Minimising/invertJulesRT_new.py
|
1
|
3853
|
#!/usr/bin/env python
import sys
import os
from copy import copy
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as opt
from runJulesRTStruct import runJulesRTStruct
class julesRTData():
def __init__(self):
self.lai=float()
self.leafT=float()
self.leafR=float()
self.soilR=float()
self.sza=float()
self.obsUncert=float()
self.obsVal=float()
self.diffuse=False
self.unifrom=True
self.obsType="fapar"
def julesRT_wrapper( argList, data ):
"""Call the Jules 2S model using a list variable
Also implement any specific relationships between
variables here (e.g. leaf_r=leaf_t)
argList is stuff to be minimised (i.e. structural parameters)
controlVars are other arguments required to be passed
szaList are the solar angles at which to evaluate the model
"""
astruc=argList[0]
bstruc=argList[1]
julesReturn=runJulesRTStruct(
astruc=astruc,
bstruc=bstruc,
lai=data.lai,
leafT=data.leafT,
leafR=data.leafR,
soilR=data.soilR,
sza=data.sza,
diffuse=data.diffuse,
uniform=data.uniform)
if data.obsType=='fapar':
return julesReturn[0]
else:
return julesReturn[1]
def costFunction( params, controlData, Xprior=None, Xuncert=None ):
"""Var-type cost function for JULES
"""
n=len(controlData)
Ymodel=np.zeros(n)
Yobs=np.zeros(n)
R=np.zeros((n,n))
#compute the modelled albedo/fAPAR values
for (i,data) in enumerate(controlData):
Ymodel[i]=julesRT_wrapper( params, data )
Yobs[i]=data.obsVal
R[i,i]=1./(data.obsUncert**2)
#VAR term one (obs):
diff=Ymodel-Yobs
cost=0.5*np.dot(diff,np.dot(R,diff.T))
if Xprior != None:
#compute B matrix
B=np.diag(1./(np.array(Xuncert)**2))
#VAR term two:
diff=np.array(params)-np.array(Xprior)
cost+=0.5*np.dot(diff,np.dot(B,diff.T))
return cost
def fminJulesRT( initParams, controls, Xprior=None, Xuncert=None ):
'''Run the chosen minimisers over the data
'''
xOpt=opt.fmin( costFunction, initParams, args=(controls, Xprior, Xuncert ), disp=True, maxfun=10000)
return xOpt
def solveJulesStruct(controlData, initParams=np.array([0.0,0.0])):
''' An example function for running the minimiser
'''
ret=fminJulesRT( initParams, controlData, Xprior=None, Xuncert=None )
print ret
return ret
def addTwinObs(controlData,astruc=1.0,bstruc=0.0):
'''Add dummy (or "twin") observations into the control
data given a value for the structure parameters
'''
for (i,data) in enumerate(controlData):
controlData[i].obsVal=julesRT_wrapper( [astruc,bstruc], data )
#print controlData[i].obsVal
if __name__=="__main__":
controlData=[]
controlData.append(julesRTData())
controlData[-1].lai=2.0
controlData[-1].leafT=0.1
controlData[-1].leafR=0.15
controlData[-1].soilR=0.05
controlData[-1].sza=30.
controlData[-1].diffuse=False
controlData[-1].uniform=True
controlData[-1].obsType='fapar'
controlData[-1].obsVal=0.0
controlData[-1].obsUncert=1.0
controlData.append(julesRTData())
controlData[-1].lai=2.0
controlData[-1].leafT=0.1
controlData[-1].leafR=0.15
controlData[-1].soilR=0.05
controlData[-1].sza=10.
controlData[-1].diffuse=False
controlData[-1].uniform=True
controlData[-1].obsType='fapar'
controlData[-1].obsVal=0.0
controlData[-1].obsUncert=1.0
controlData.append(julesRTData())
controlData[-1].lai=2.0
controlData[-1].leafT=0.1
controlData[-1].leafR=0.15
controlData[-1].soilR=0.05
controlData[-1].sza=60.
controlData[-1].diffuse=False
controlData[-1].uniform=True
controlData[-1].obsType='fapar'
controlData[-1].obsVal=0.0
controlData[-1].obsUncert=1.0
addTwinObs(controlData,astruc=1.0,bstruc=0.0)
solveJulesStruct(controlData)
|
apache-2.0
|
xzh86/scikit-learn
|
examples/mixture/plot_gmm_pdf.py
|
284
|
1528
|
"""
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
ucloud/uai-sdk
|
examples/caffe/train/rfcn/code/tools/train_svms.py
|
16
|
13480
|
#!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Train post-hoc SVMs using the algorithm and hyper-parameters from
traditional R-CNN.
"""
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file
from datasets.factory import get_imdb
from fast_rcnn.test import im_detect
from utils.timer import Timer
import caffe
import argparse
import pprint
import numpy as np
import numpy.random as npr
import cv2
from sklearn import svm
import os, sys
class SVMTrainer(object):
"""
Trains post-hoc detection SVMs for all classes using the algorithm
and hyper-parameters of traditional R-CNN.
"""
def __init__(self, net, imdb):
self.imdb = imdb
self.net = net
self.layer = 'fc7'
self.hard_thresh = -1.0001
self.neg_iou_thresh = 0.3
dim = net.params['cls_score'][0].data.shape[1]
scale = self._get_feature_scale()
print('Feature dim: {}'.format(dim))
print('Feature scale: {:.3f}'.format(scale))
self.trainers = [SVMClassTrainer(cls, dim, feature_scale=scale)
for cls in imdb.classes]
def _get_feature_scale(self, num_images=100):
TARGET_NORM = 20.0 # Magic value from traditional R-CNN
_t = Timer()
roidb = self.imdb.roidb
total_norm = 0.0
count = 0.0
inds = npr.choice(xrange(self.imdb.num_images), size=num_images,
replace=False)
for i_, i in enumerate(inds):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
total_norm += np.sqrt((feat ** 2).sum(axis=1)).sum()
count += feat.shape[0]
print('{}/{}: avg feature norm: {:.3f}'.format(i_ + 1, num_images,
total_norm / count))
return TARGET_NORM * 1.0 / (total_norm / count)
def _get_pos_counts(self):
counts = np.zeros((len(self.imdb.classes)), dtype=np.int)
roidb = self.imdb.roidb
for i in xrange(len(roidb)):
for j in xrange(1, self.imdb.num_classes):
I = np.where(roidb[i]['gt_classes'] == j)[0]
counts[j] += len(I)
for j in xrange(1, self.imdb.num_classes):
print('class {:s} has {:d} positives'.
format(self.imdb.classes[j], counts[j]))
return counts
def get_pos_examples(self):
counts = self._get_pos_counts()
for i in xrange(len(counts)):
self.trainers[i].alloc_pos(counts[i])
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
gt_inds = np.where(roidb[i]['gt_classes'] > 0)[0]
gt_boxes = roidb[i]['boxes'][gt_inds]
_t.tic()
scores, boxes = im_detect(self.net, im, gt_boxes)
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
cls_inds = np.where(roidb[i]['gt_classes'][gt_inds] == j)[0]
if len(cls_inds) > 0:
cls_feat = feat[cls_inds, :]
self.trainers[j].append_pos(cls_feat)
print 'get_pos_examples: {:d}/{:d} {:.3f}s' \
.format(i + 1, len(roidb), _t.average_time)
def initialize_net(self):
# Start all SVM parameters at zero
self.net.params['cls_score'][0].data[...] = 0
self.net.params['cls_score'][1].data[...] = 0
# Initialize SVMs in a smart way. Not doing this because its such
# a good initialization that we might not learn something close to
# the SVM solution.
# # subtract background weights and biases for the foreground classes
# w_bg = self.net.params['cls_score'][0].data[0, :]
# b_bg = self.net.params['cls_score'][1].data[0]
# self.net.params['cls_score'][0].data[1:, :] -= w_bg
# self.net.params['cls_score'][1].data[1:] -= b_bg
# # set the background weights and biases to 0 (where they shall remain)
# self.net.params['cls_score'][0].data[0, :] = 0
# self.net.params['cls_score'][1].data[0] = 0
def update_net(self, cls_ind, w, b):
self.net.params['cls_score'][0].data[cls_ind, :] = w
self.net.params['cls_score'][1].data[cls_ind] = b
def train_with_hard_negatives(self):
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
hard_inds = \
np.where((scores[:, j] > self.hard_thresh) &
(roidb[i]['gt_overlaps'][:, j].toarray().ravel() <
self.neg_iou_thresh))[0]
if len(hard_inds) > 0:
hard_feat = feat[hard_inds, :].copy()
new_w_b = \
self.trainers[j].append_neg_and_retrain(feat=hard_feat)
if new_w_b is not None:
self.update_net(j, new_w_b[0], new_w_b[1])
print(('train_with_hard_negatives: '
'{:d}/{:d} {:.3f}s').format(i + 1, len(roidb),
_t.average_time))
def train(self):
# Initialize SVMs using
# a. w_i = fc8_w_i - fc8_w_0
# b. b_i = fc8_b_i - fc8_b_0
# c. Install SVMs into net
self.initialize_net()
# Pass over roidb to count num positives for each class
# a. Pre-allocate arrays for positive feature vectors
# Pass over roidb, computing features for positives only
self.get_pos_examples()
# Pass over roidb
# a. Compute cls_score with forward pass
# b. For each class
# i. Select hard negatives
# ii. Add them to cache
# c. For each class
# i. If SVM retrain criteria met, update SVM
# ii. Install new SVM into net
self.train_with_hard_negatives()
# One final SVM retraining for each class
# Install SVMs into net
for j in xrange(1, self.imdb.num_classes):
new_w_b = self.trainers[j].append_neg_and_retrain(force=True)
self.update_net(j, new_w_b[0], new_w_b[1])
class SVMClassTrainer(object):
"""Manages post-hoc SVM training for a single object class."""
def __init__(self, cls, dim, feature_scale=1.0,
C=0.001, B=10.0, pos_weight=2.0):
self.pos = np.zeros((0, dim), dtype=np.float32)
self.neg = np.zeros((0, dim), dtype=np.float32)
self.B = B
self.C = C
self.cls = cls
self.pos_weight = pos_weight
self.dim = dim
self.feature_scale = feature_scale
self.svm = svm.LinearSVC(C=C, class_weight={1: 2, -1: 1},
intercept_scaling=B, verbose=1,
penalty='l2', loss='l1',
random_state=cfg.RNG_SEED, dual=True)
self.pos_cur = 0
self.num_neg_added = 0
self.retrain_limit = 2000
self.evict_thresh = -1.1
self.loss_history = []
def alloc_pos(self, count):
self.pos_cur = 0
self.pos = np.zeros((count, self.dim), dtype=np.float32)
def append_pos(self, feat):
num = feat.shape[0]
self.pos[self.pos_cur:self.pos_cur + num, :] = feat
self.pos_cur += num
def train(self):
print('>>> Updating {} detector <<<'.format(self.cls))
num_pos = self.pos.shape[0]
num_neg = self.neg.shape[0]
print('Cache holds {} pos examples and {} neg examples'.
format(num_pos, num_neg))
X = np.vstack((self.pos, self.neg)) * self.feature_scale
y = np.hstack((np.ones(num_pos),
-np.ones(num_neg)))
self.svm.fit(X, y)
w = self.svm.coef_
b = self.svm.intercept_[0]
scores = self.svm.decision_function(X)
pos_scores = scores[:num_pos]
neg_scores = scores[num_pos:]
pos_loss = (self.C * self.pos_weight *
np.maximum(0, 1 - pos_scores).sum())
neg_loss = self.C * np.maximum(0, 1 + neg_scores).sum()
reg_loss = 0.5 * np.dot(w.ravel(), w.ravel()) + 0.5 * b ** 2
tot_loss = pos_loss + neg_loss + reg_loss
self.loss_history.append((tot_loss, pos_loss, neg_loss, reg_loss))
for i, losses in enumerate(self.loss_history):
print((' {:d}: obj val: {:.3f} = {:.3f} '
'(pos) + {:.3f} (neg) + {:.3f} (reg)').format(i, *losses))
# Sanity check
scores_ret = (
X * 1.0 / self.feature_scale).dot(w.T * self.feature_scale) + b
assert np.allclose(scores, scores_ret[:, 0], atol=1e-5), \
"Scores from returned model don't match decision function"
return ((w * self.feature_scale, b), pos_scores, neg_scores)
def append_neg_and_retrain(self, feat=None, force=False):
if feat is not None:
num = feat.shape[0]
self.neg = np.vstack((self.neg, feat))
self.num_neg_added += num
if self.num_neg_added > self.retrain_limit or force:
self.num_neg_added = 0
new_w_b, pos_scores, neg_scores = self.train()
# scores = np.dot(self.neg, new_w_b[0].T) + new_w_b[1]
# easy_inds = np.where(neg_scores < self.evict_thresh)[0]
not_easy_inds = np.where(neg_scores >= self.evict_thresh)[0]
if len(not_easy_inds) > 0:
self.neg = self.neg[not_easy_inds, :]
# self.neg = np.delete(self.neg, easy_inds)
print(' Pruning easy negatives')
print(' Cache holds {} pos examples and {} neg examples'.
format(self.pos.shape[0], self.neg.shape[0]))
print(' {} pos support vectors'.format((pos_scores <= 1).sum()))
print(' {} neg support vectors'.format((neg_scores >= -1).sum()))
return new_w_b
else:
return None
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train SVMs (old skool)')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
# Must turn this off to prevent issues when digging into the net blobs to
# pull out features (tricky!)
cfg.DEDUP_BOXES = 0
# Must turn this on because we use the test im_detect() method to harvest
# hard negatives
cfg.TEST.SVM = True
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
print('Using config:')
pprint.pprint(cfg)
# fix the random seed for reproducibility
np.random.seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
if args.gpu_id is not None:
caffe.set_device(args.gpu_id)
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
out = os.path.splitext(os.path.basename(args.caffemodel))[0] + '_svm'
out_dir = os.path.dirname(args.caffemodel)
imdb = get_imdb(args.imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
# enhance roidb to contain flipped examples
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_images()
print 'done'
SVMTrainer(net, imdb).train()
filename = '{}/{}.caffemodel'.format(out_dir, out)
net.save(filename)
print 'Wrote svm model to: {:s}'.format(filename)
|
apache-2.0
|
Tatsuonline/Dark-Matter-Research
|
pulse-gradient-analysis.py
|
1
|
6258
|
#! /usr/bin/env python3
#
# Author: Tatsu
import sys
import os
import math
import re
import matplotlib
import pylab
import time
if len(sys.argv) < 2:
print("Usage: ", sys.argv[0], "<file>")
sys.exit(1)
if not os.path.exists(sys.argv[1]):
print("%s does not exist" % (sys.argv[1],))
sys.exit(1)
if not os.path.isfile(sys.argv[1]):
print("%s is not an ordinary file" % (sys.argv[1],))
sys.exit(1)
if not os.access(sys.argv[1], os.R_OK):
print("%s is not readable" % (sys.argv[1],))
sys.exit(1)
class PulseGradientAnalysis:
def __init__(self):
self.startTime = time.time() # Starts timing the program
self.sampleNumber = 24 # Number of samples
self.sampleIndex = 0 # Sample index value
self.maximumVoltage = 0 # The largest value for voltage
self.maximumVoltageIndex = 0 # Stores the index value of the largest voltage
self.maximumAmplitudesArray = [] # Stores all the maximum amplitudes for each event for plotting
self.sampleAmplitudesArray = [] # Stores all the sample amplitudes for each event for plotting
self.eventCounter = 0 # Counts the number of events
self.eventChecker = 0 # Used to determine if voltage after max stays above baseline- in which case it is discarded
self.eventIndexEnd = 0 # Used to determine if voltage after max stays above baseline- in which case it is discarded
self.dataList = []
def process_data(self):
with open(sys.argv[1], "r") as fileInput:
for line in fileInput:
self.event = re.search("Event", line) # Utilizes regex to determine when an event has taken place
if self.event:
self.eventCounter += 1 # Increments event counter
if self.eventCounter > 1:
self.event_processor()
self.baselineCounter = 0 # Sets the baselineCounter back to 0 for a new event
self.baselineArray = [] # Array to hold the first 20 points
self.baselineCounter = 0 # Ensures that 20 points are taken for the baselineArray
self.baselineVoltage = 0 # The average of the baselineArray
self.averageTemporary = 0 # A temporary variable used for adding the values in baselineArray
self.i = 0 # Usage in loops
self.voltageArray = [] # Stores all the voltage values in an array
else:
self.dataList = line.split(",") # Splits the data in each line by the comma
if len(self.dataList) == 1: # Checks for empty lines stored as '\n'
next(fileInput) # Skips the line
self.dataList = line.split(",") # Splits the data in each line by the comma
self.voltage = abs(float(self.dataList[1])) # Takes the second data value stored in the list
self.voltageArray.append(self.voltage) # Adds a new voltage data value to the array
if self.baselineCounter != 21: # To ensure that 20 points are taken for the baselineArray
self.baselineArray.append(self.voltage) # Adds a new voltage data value to the array
self.baselineCounter += 1 # Increments the baselineCounter
else:
while self.i != 20: # Goes from i = 0 to i = 19
self.averageTemporary = self.averageTemporary + float(self.baselineArray[self.i]) # Converts the values in the baselineArray into integers and adds them up
self.i += 1 # Increments i
self.baselineVoltage = self.averageTemporary / 20 # Takes the average of the baselineArray
self.event_processor()
def event_processor(self):
self.maximumVoltage = max(self.voltageArray) # Finds the largest value in the array and sets it as maximumVoltage
self.maximumVoltageIndex = self.voltageArray.index(self.maximumVoltage) # Finds the index value of maximumVoltage
self.maximumAmplitudesArray.append(float(self.maximumVoltage) - self.baselineVoltage) # Stores all the amplitudes for each event for plotting
self.sampleIndex = self.maximumVoltageIndex + self.sampleNumber # Calculates the index value of the sample
if self.sampleIndex > len(self.voltageArray): # Ensures that the sample index does not exceed the size of the array
self.maximumAmplitudesArray.pop() # Removes the last value in the array
else:
self.sampleAmplitudesArray.append(float(self.voltageArray[self.sampleIndex]) - self.baselineVoltage) # Stores all the sample amplitudes for each event for plotting
self.eventChecker = self.maximumVoltageIndex # Sets the starting point to be the max voltage
self.eventIndexEnd = (len(self.voltageArray) - 1) # Sets the end point to be the end of the array
self.usefulEvent = 0
while self.eventChecker < self.eventIndexEnd:
if self.voltageArray[self.eventChecker] <= self.baselineVoltage: # Checks if the event is useful
self.usefulEvent = 1
self.eventChecker = self.eventIndexEnd # Breaks the while loop
self.eventChecker += 1 # Increments the index
if self.usefulEvent == 0:
self.maximumAmplitudesArray.pop()
self.sampleAmplitudesArray.pop()
def display_data(self):
matplotlib.pyplot.scatter(self.maximumAmplitudesArray, self.sampleAmplitudesArray) # Plots the maximum amplitudes on x-axis and sample amplitudes on y-axis
matplotlib.pyplot.title('Pulse Gradient Analysis Plot (N = %d)' % self.sampleNumber)
matplotlib.pyplot.xlabel('Peak Amplitudes')
matplotlib.pyplot.ylabel('Sample Amplitudes')
matplotlib.pyplot.show()
print('\n\n')
print('___Additional Information___')
print('Number of events processed: %d' % self.eventCounter)
print('Number of events plotted: %d' % len(self.maximumAmplitudesArray))
print('Time taken to process data: %f' % (time.time() - self.startTime), "seconds")
print('\n\n')
pulse_gradient_analysis = PulseGradientAnalysis()
pulse_gradient_analysis.process_data()
pulse_gradient_analysis.display_data()
|
gpl-3.0
|
wojciech-galan/viruses_classifier
|
viruses_classifier/__main__.py
|
1
|
2009
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
import argparse
import numpy as np
from sklearn.externals import joblib
import constants
from classifier import classify, seq_to_features
from libs import read_sequence
def validate_classifier_name():pass #TODO
def validate_acid_type(): pass #TODO
def main(args=sys.argv[1:]):
parser = argparse.ArgumentParser(description='') #todo dodać opis
parser.add_argument('sequence', type=str, help='sequence in plaintext')
parser.add_argument('--nucleic_acid', type=str, help='nucleic acid: either DNA or RNA',
choices=['DNA', 'RNA', 'dna', 'rna'])
parser.add_argument('--classifier', type=str, help='classifier: SVC, kNN, QDA or LR',
choices=['SVC','kNN', 'QDA', 'LR', 'svc', 'knn', 'qda', 'lr'])
parser.add_argument('--probas', '-p', dest='probas', action='store_true')
parsed_args = parser.parse_args(args)
classifier_name = parsed_args.classifier.lower()
nucleic_acid = parsed_args.nucleic_acid.lower()
feature_indices = constants.feature_indices[parsed_args.classifier.lower()]
# if not (parsed_args.classifier.lower() == 'svc' or parsed_args.classifier.lower() == 'knn' or
# parsed_args.classifier.lower() == 'qda'):
# raise ValueError("Classifier should be SVC, kNN or QDA")
# if not (parsed_args.nucleic_acid.lower() == 'dna' or parsed_args.nucleic_acid.lower() == 'rna'):
# raise ValueError("Nucleic acid tye should be either DNA or RNA")
sequence = read_sequence.read_sequence(parsed_args.sequence)
scaler = joblib.load(constants.scaler_path)
classifier = joblib.load(constants.classifier_paths[classifier_name])
seq_features = seq_to_features(sequence, nucleic_acid)
print classify(seq_features, scaler, classifier,
feature_indices, parsed_args.probas)
if __name__ == '__main__':
main(sys.argv[1:])
# TODO poprawa konfiguracji,żeby działała pod windowsem
|
gpl-3.0
|
leylabmpi/pyTecanFluent
|
pyTecanFluent/QPCR.py
|
1
|
20332
|
# -*- coding: utf-8 -*-
# import
## batteries
from __future__ import print_function
import os
import sys
import argparse
from itertools import product,cycle
import string
## 3rd party
import numpy as np
import pandas as pd
## package
from pyTecanFluent import Utils
from pyTecanFluent import Fluent
from pyTecanFluent import Labware
# functions
def get_desc():
desc = 'Create robot commands for qPCR assay setup'
return desc
def parse_args(test_args=None, subparsers=None):
# desc
desc = get_desc()
epi = """DESCRIPTION:
Create a worklist file for the TECAN Fluent robot for qPCR setup.
The input is an exported plate layout from the BioRad qPCR software.
The file format should be Excel or CSV.
Just create a plate layout for your experimet, then export and add some needed info:
The following columns should also be added to the table:
* "Sample labware name"
* labware name containing the sample (any name that you want)
* Exmaple: "source plate"
* "Sample labware type"
* labware type (must EXACTLY match an existing labware type)
* Example: "96 Eppendorf TwinTec PCR"
* "Sample location"
* location of sample in the source plate.
* numeric; column-wise indexing
* "Sample volume"
* numeric; sample volume in ul
* "MM name"
* Name of master mix for that sample
* This allows for multiple master mixes per assay
* "MM volume"
* Volume of master mix in PCR rxn (ul)
* "Water volume"
* Volume of water in PCR rxn (ul)
Notes:
* Sample locations in plates numbered are column-wise (left-to-right)
* The setup file (input table) MUST have a header (capitalization doesn't matter)
* All volumes are in ul.
"""
if subparsers:
parser = subparsers.add_parser('qPCR', description=desc, epilog=epi,
formatter_class=argparse.RawTextHelpFormatter)
else:
parser = argparse.ArgumentParser(description=desc, epilog=epi,
formatter_class=argparse.RawTextHelpFormatter)
# args
## I/O
groupIO = parser.add_argument_group('I/O')
groupIO.add_argument('setup', metavar='SetupFile', type=str,
help='An Excel or CSV file with experimental setup')
groupIO.add_argument('--prefix', type=str, default='TECAN_qPCR',
help='Output file name prefix (default: %(default)s)')
groupIO.add_argument('--format', type=str, default=None,
choices=[None, 'excel', 'csv', 'tsv'],
help='File format (excel, csv, or tsv). If not provided, the format is determined from the file extension (default: %(default)s)')
## Source labware
src = parser.add_argument_group('Source labware')
src.add_argument('--mm-type', type=str, default='2ml Eppendorf waste',
help='Mastermix labware type (default: %(default)s)')
src.add_argument('--water-type', type=str, default='100ml_1 waste',
help='Water labware type (default: %(default)s)')
## Destination labware
dest = parser.add_argument_group('Destination labware')
dest.add_argument('--dest', type=str, default='Destination plate',
help='Destination plate labware name (default: %(default)s)')
dest.add_argument('--dest-type', type=str, default='384 Well Biorad PCR',
help='Destination plate labware type (default: %(default)s)')
# Liquid classes
liq = parser.add_argument_group('Liquid classes')
liq.add_argument('--mm-liq', type=str, default='MasterMix Free Multi Wall Disp',
help='Mastermix liquid class (default: %(default)s)')
liq.add_argument('--samp-liq', type=str, default='Water Free Single Wall Disp',
help='Sample liquid class (default: %(default)s)')
liq.add_argument('--water-liq', type=str, default='Water Free Single Wall Disp',
help='Water liquid class (default: %(default)s)')
liq.add_argument('--n-tip-reuse', type=int, default=4,
help='Number of tip reuses for applicable reagents (default: %(default)s)')
# Parse & return
if test_args:
args = parser.parse_args(test_args)
return args
return parser
def main(args=None):
# Input
if args is None:
args = parse_args()
check_args(args)
# Load input assay setup table
df_setup = load_setup(args.setup,
file_format=args.format)
# gwl object init
TipTypes = ['FCA, 1000ul SBS', 'FCA, 200ul SBS',
'FCA, 50ul SBS', 'FCA, 10ul SBS']
gwl = Fluent.gwl(TipTypes)
# adding sample/reagent destinations to setup table
n_wells = gwl.db.get_labware_wells(args.dest_type)
add_dest(df_setup, args.dest, args.dest_type, n_wells)
df_setup = check_rack_labels(df_setup)
# Reordering dest for optimal pipetting
if n_wells == 384:
df_setup = Utils.reorder_384well(df_setup, gwl,
labware_name_col='dest_labware_name',
labware_type_col='dest_labware_type',
position_col='dest_target_position')
elif n_wells == 96:
df_setup.sort(['dest_target_position'], inplace=True)
else:
msg = 'Labware type "{}" not recognized'
raise ValueError(msg.format(args.desttype))
# Adding commands to gwl object
pip_mastermixes(df_setup, gwl=gwl,
src_labware_type=args.mm_type,
liq_cls=args.mm_liq,
n_tip_reuse=args.n_tip_reuse)
## Samples
pip_samples(df_setup, gwl=gwl,
liq_cls=args.samp_liq)
## Water
pip_water(df_setup, gwl=gwl,
src_labware_type=args.water_type,
liq_cls=args.water_liq)
## writing out worklist (gwl) file
gwl_file = args.prefix + '.gwl'
gwl.write(gwl_file)
# making labware table
lw = Labware.labware()
lw.add_gwl(gwl)
lw_df = lw.table()
lw_file = args.prefix + '_labware.txt'
lw_df.to_csv(lw_file, sep='\t', index=False)
# Creating report file
report_file = args.prefix + '_report.txt'
with open(report_file, 'w') as repFH:
MM_names = np.unique(df_setup['mm name'])
for i,MM_name in enumerate(MM_names):
df = df_setup.loc[df_setup['mm name'] == MM_name]
df.reset_index(inplace=True)
write_report(df, MM_name=MM_name, outFH=repFH)
# status on files written
Utils.file_written(gwl_file)
Utils.file_written(lw_file)
Utils.file_written(report_file)
def check_args(args):
"""Checking user input
"""
# special characters for namings
args.dest = Utils.rm_special_chars(args.dest)
def load_setup(input_file, file_format=None, header=0):
"""Loading setup file (Excel, csv, or tab-delim)
"""
# format
if file_format is None:
if input_file.endswith('.csv'):
file_format = 'csv'
elif input_file.endswith('.txt') or input_file.endswith('.tsv'):
file_format = 'tab'
elif input_file.endswith('.xls') or input_file.endswith('.xlsx'):
file_format = 'excel'
else:
file_format = file_format.lower()
# load via pandas IO
if file_format == 'csv':
df = pd.read_csv(input_file, sep=';', header=header)
elif file_format == 'tab':
df = pd.read_csv(input_file, sep='\t', header=header)
elif file_format == 'excel':
xls = pd.ExcelFile(input_file)
df = pd.read_excel(xls, header=header)
else:
raise ValueError('Setup file not in usable format')
# caps-invariant column IDs
df.columns = [x.lower() for x in df.columns]
# checking format of table
check_df_setup(df)
# filtering NaN for required columns
df.dropna(subset=['sample type', 'sample labware name', 'sample location'],
inplace=True)
# making sure labware names are "TECAN worklist friendly"
df = Utils.rm_special_chars(df, 'sample labware name')
# assert & return
assert df.shape[1] > 1, 'Input file is only 1 column; wrong delimiter used?'
return df
def check_df_setup(df_setup):
"""Assertions of df_conc object formatting
"""
# checking for column IDs
col_IDs = ('row', 'column', 'sample type',
'sample labware name', 'sample labware type',
'sample location', 'sample volume',
'mm name', 'mm volume', 'water volume')
msg = 'Column "{}" not found (captilization invariant)'
for x in col_IDs:
if not x in df_setup.columns:
raise ValueError(msg.format(x))
# checking sample locations (>=1)
msg = 'ERROR (SetupFile, line={}): location is < 1'
for i,loc in enumerate(df_setup['sample location']):
if loc < 1:
print(msg.format(i), file=sys.stderr)
# checking sample conc
msg = 'ERROR (setupFile, line={}): volume is < 0'
for i,vol in enumerate(df_setup['sample volume']):
assert np.isnan(vol) or vol >= 0.0, msg.format(i)
for i,vol in enumerate(df_setup['mm volume']):
assert np.isnan(vol) or vol >= 0.0, msg.format(i)
for i,vol in enumerate(df_setup['water volume']):
assert np.isnan(vol) or vol >= 0.0, msg.format(i)
# removing "tube" from end of labware type (if present)
Utils.rm_tube(df_setup, 'sample labware type')
def check_rack_labels(df_setup):
"""Removing '.' for rack labels (causes execution failures)
"""
cols = ['sample labware name', 'mm name', 'dest_labware_name']
for x in cols:
df_setup[x] = [str(y).replace('.', '_') for y in df_setup[x].tolist()]
return df_setup
def plate2robot_loc(row_val, col_val, n_wells):
"""Changing positioning from row (letter) and column (number)
to just numeric position (column-wise) on the plate,
which is needed for TECAN robot positioning.
Using index for identifying well number in plate
[args]
row_val: string
col_vol: string
plate_type: string; plate type to determine well location indexing
"""
# index for converting row to numeric
idx = string.ascii_uppercase
idx = {x:i+1 for i,x in enumerate(idx)}
row_val = idx[row_val]
# getting location on plate
msg = 'Destination location "{}" is out of range'
if n_wells == 96:
loc = (col_val - 1) * 8 + row_val
assert loc > 0 and loc <= 96, msg.format(loc)
elif n_wells == 384:
loc = (col_val - 1) * 16 + row_val
assert loc > 0 and loc <= 384, msg.format(loc)
else:
msg = 'Number of wells is not valid: "{}"'
raise ValueError(msg.format(plate_type))
return loc
def add_dest(df_setup, dest_labware_name, dest_labware_type, n_wells=96):
"""Setting destination locations for samples & reagents
Adding to df_conc:
[dest_labware, dest_location]
"""
# setting destination labware
df_setup['dest_labware_name'] = dest_labware_name
df_setup['dest_labware_type'] = dest_labware_type
# setting destination location based on plate layout
func = lambda x: plate2robot_loc(x['row'], x['column'], n_wells=n_wells)
df_setup['dest_target_position'] = df_setup.apply(func, 1)
def reorder_384well(df, reorder_col):
"""Reorder values so that the odd, then the even locations are
transferred. This is faster for a 384-well plate
df: pandas.DataFrame
reorder_col: column name to reorder
"""
df['TECAN_sort_IS_EVEN'] = [x % 2 == 0 for x in df[reorder_col]]
df.sort_values(by=['TECAN_sort_IS_EVEN', reorder_col], inplace=True)
df = df.drop('TECAN_sort_IS_EVEN', 1)
df.index = range(df.shape[0])
return df
def pip_mastermixes(df_setup, gwl, src_labware_type,
liq_cls='Mastermix Free Single',
n_tip_reuse=1):
"""Writing worklist commands for aliquoting mastermix.
Re-using tips
"""
# split by mastermix names (different master mixes)
MM_names = np.unique(df_setup['mm name'])
gwl.add(Fluent.Comment('Mastermixes'))
for i,MM_name in enumerate(MM_names):
# partitioning to just focal mastermix
df = df_setup.loc[df_setup['mm name'] == MM_name]
df.reset_index(inplace=True)
# all same volumes for mastermix?
pip_mastermix(df, gwl=gwl,
MM_name=MM_name,
src_labware_type=src_labware_type,
liq_cls=liq_cls,
n_tip_reuse=n_tip_reuse)
def pip_mastermix(df_map, gwl, MM_name, src_labware_type,
liq_cls='Mastermix Free Single',
n_tip_reuse=1):
"""Dispense of particular mastermix
"""
# df copy
df = df_map.copy()
## ordering df for proper tip reuse
x = cycle(range(8))
df['CHANNEL_ORDER'] = [next(x) for y in range(df.shape[0])]
x = cycle(range(n_tip_reuse))
df['TIP_BATCH'] = Utils.tip_batch(df['CHANNEL_ORDER'], n_tip_reuse)
df.sort_values(by=['TIP_BATCH',
'CHANNEL_ORDER',
'dest_target_position'], inplace=True)
df.reset_index(inplace=True)
# iterating mastermix records in setup table (single mastermix)
gwl.add(Fluent.Comment('Mastermix: {}'.format(MM_name)))
for i in range(df.shape[0]):
# aspiration
asp = Fluent.Aspirate()
asp.RackLabel = '{0} MM[{1:0>3}]'.format(MM_name, 1)
asp.RackType = src_labware_type
asp.Position = 1
asp.Volume = df.loc[i,'mm volume']
asp.LiquidClass = liq_cls
gwl.add(asp)
# dispensing
disp = Fluent.Dispense()
disp.RackLabel = df.loc[i,'dest_labware_name']
disp.RackType = df.loc[i,'dest_labware_type']
disp.Position = df.loc[i,'dest_target_position']
disp.Volume = df.loc[i,'mm volume']
asp.LiquidClass = liq_cls
gwl.add(disp)
# waste
if (i + 1) % n_tip_reuse == 0 or i + 1 == df.shape[0]:
gwl.add(Fluent.Waste())
# finish section
gwl.add(Fluent.Break())
def pip_mastermix_multi_disp(df, gwl, MM_name, src_labware_type, multi_disp=6,
liq_cls='Mastermix Free Multi'):
"""Writing worklist commands for aliquoting mastermix.
Re-using tips
"""
# assertions
cols = ['dest_labware_name', 'dest_labware_type']
assert df.drop_duplicates(cols).shape[0] == 1
# getting wells to exclude
lw_type = df.loc[0,'dest_labware_type']
n_wells = gwl.db.get_labware_wells(lw_type)
all_wells = [x+1 for x in range(n_wells)]
target_pos = df['dest_target_position'].tolist()
to_exclude = set(all_wells) - set(target_pos)
# creating reagnet distribution command
rd = Fluent.Reagent_distribution()
rd.SrcRackLabel = '{0} MM[{1:0>3}]'.format(MM_name, 1)
rd.SrcRackType = '1.5ml Eppendorf waste'
rd.SrcPosStart = 1
rd.SrcPosEnd = 1
# dispense parameters
rd.DestRackLabel = df.loc[0,'dest_labware_name']
rd.DestRackType = df.loc[0,'dest_labware_type']
rd.DestPosStart = 1
rd.DestPosEnd = n_wells
# other
rd.Volume = df.loc[0,'mm volume']
rd.LiquidClass = liq_cls
rd.NoOfDiTiReuses = 2
rd.NoOfMultiDisp = multi_disp
rd.Direction = 0
rd.ExcludedDestWell = ';'.join([str(x) for x in list(to_exclude)])
# adding to gwl object
gwl.add(rd)
# adding break
gwl.add(Fluent.Break())
def pip_samples(df_setup, gwl, liq_cls='Water Contact Wet Single'):
"""Commands for aliquoting samples into distination plate
"""
gwl.add(Fluent.Comment('Samples'))
# filtering 'nan' from table
x = pd.notnull(df_setup['sample labware name'])
y = pd.notnull(df_setup['sample labware type'])
z = pd.notnull(df_setup['sample location'])
df = df_setup.loc[x & y & z]
df.reset_index(inplace=True)
if df.shape[0] < df_setup.shape[0]:
msg = 'WARNING: some samples skipped due to missing values!'
print(msg, file=sys.stderr)
# for each Sample, create asp/dispense commands
for i in range(df.shape[0]):
# aspiration
asp = Fluent.Aspirate()
asp.RackLabel = df.loc[i,'sample labware name']
asp.RackType = df.loc[i,'sample labware type']
asp.Position = df.loc[i, 'sample location']
asp.Volume = df.loc[i,'sample volume']
asp.LiquidClass = liq_cls
gwl.add(asp)
# dispensing
disp = Fluent.Dispense()
disp.RackLabel = df.loc[i,'dest_labware_name']
disp.RackType = df.loc[i,'dest_labware_type']
disp.Position = df.loc[i,'dest_target_position']
disp.Volume = df.loc[i,'sample volume']
asp.LiquidClass = liq_cls
gwl.add(disp)
# waste (no tip re-use)
gwl.add(Fluent.Waste())
gwl.add(Fluent.Break())
def pip_water(df_setup, gwl, src_labware_type,
liq_cls='Water Contact Wet Single'):
"""Writing worklist commands for aliquoting water
Using single asp-disp.
"""
gwl.add(Fluent.Comment('Water'))
# filtering 'nan' from table
x = pd.notnull(df_setup['water volume'])
df = df_setup.loc[x]
df.index = range(df.shape[0])
if df.shape[0] < df_setup.shape[0]:
msg = 'WARNING: water asp/disp for some samples skipped due to missing "water volume" values!'
print(msg, file=sys.stderr)
# for each Sample, create asp/dispense commands
for i in range(df.shape[0]):
if df.loc[i,'water volume'] <= 0:
msg = 'WARNING: skipping water asp/disp for sample (volue <= 0)'
print(msg, file=sys.stderr)
continue
# aspiration
asp = Fluent.Aspirate()
asp.RackLabel = 'Water source[{0:0>3}]'.format(1)
asp.RackType = src_labware_type
asp.Position = 1
asp.Volume = df.loc[i,'water volume']
asp.LiquidClass = liq_cls
gwl.add(asp)
# dispensing
disp = Fluent.Dispense()
disp.RackLabel = df.loc[i,'dest_labware_name']
disp.RackType = df.loc[i,'dest_labware_type']
disp.Position = df.loc[i,'dest_target_position']
disp.Volume = df.loc[i,'water volume']
asp.LiquidClass = liq_cls
gwl.add(disp)
# waste (no tip re-use)
gwl.add(Fluent.Waste())
gwl.add(Fluent.Break())
def add_error(x, error_perc):
if x is None:
return None
return x * (1.0 + error_perc / 100.0)
def write_report_line(outFH, subject, volume, round_digits=1, error_perc=None):
if volume is None:
v = 'NA'
else:
if error_perc is not None:
volume = add_error(volume, error_perc)
v = round(volume, round_digits)
outFH.write('{}:\t{}\n'.format(subject, v))
def write_report(df_setup, MM_name, outFH):
"""Writing a report on the qPCR setup
"""
# calculating total volumes
n_rxn = df_setup.shape[0]
## total mastermix
total_mm_volume = np.sum(df_setup['mm volume'])
## total water
total_water_volume = np.sum(df_setup['water volume'])
# report
# number of samples
outFH.write('# PCR REPORT\n')
outFH.write('MasterMix: {}\n'.format(MM_name))
outFH.write('Number of total rxns:\t{}\n'.format(n_rxn))
## raw total volumes
outFH.write('# Total reagent volumes (ul)\n')
write_report_line(outFH, 'MasterMix', total_mm_volume)
write_report_line(outFH, 'Water', total_water_volume)
## with pipetting error
outFH.write('# Total reagent volumes + 10% extra (ul)\n')
write_report_line(outFH, 'MasterMix', total_mm_volume, error_perc=10)
write_report_line(outFH, 'Water', total_water_volume, error_perc=10)
## end
outFH.write('\n')
# main
if __name__ == '__main__':
pass
|
mit
|
rafaeltg/Deep-Learning-Algorithms
|
pydl/model_selection/methods.py
|
2
|
2906
|
from math import floor, ceil
from sklearn.model_selection import LeaveOneOut, KFold, StratifiedKFold, ShuffleSplit, StratifiedShuffleSplit
class TrainTestSplit:
def __init__(self, test_size=0.3):
self.test_size = test_size
def split(self, X, y=None):
assert len(X) > 0, 'X cannot be empty!'
n = len(X)
train_size = floor(n * (1 - self.test_size))
yield list(range(0, train_size, 1)), list(range(train_size, n, 1))
class TimeSeriesSplit:
def __init__(self,
window=None,
horizon=None,
n_folds=None,
window_size=0.8,
horizon_size=0.2,
fixed=True,
by=1):
self.window = window
self.horizon = horizon
self.n_folds = n_folds
self.window_size = window_size
self.horizon_size = horizon_size
self.fixed = fixed
self.by = by
def split(self, X, y=None):
assert (X is not None) and (len(X) > 0), 'X cannot be empty!'
if self.n_folds is not None:
assert isinstance(self.window_size, float) and (self.window_size > 0), "'window_size' must be greater than zero"
assert isinstance(self.horizon_size, float) and (self.horizon_size > 0), "'horizon_size' must be greater than zero"
self._calc_params(len(X))
if (self.window is not None) and (self.horizon is not None):
assert len(X) >= (self.window+self.horizon), 'window size plus horizon size cannot be greater than input size!'
starts_test = list(range(self.window, len(X)-self.horizon+1, self.by))
if self.fixed:
trains = [range(test_start-self.window, test_start) for test_start in starts_test]
else:
trains = [range(0, test_start) for test_start in starts_test]
tests = [range(test_start, test_start+self.horizon) for test_start in starts_test]
for i in range(0, len(trains)):
yield list(trains[i]), list(tests[i])
def _calc_params(self, x_size):
aux = self.window_size + (self.n_folds * self.horizon_size)
self.horizon = ceil(x_size * (self.horizon_size / aux))
self.window = x_size - (self.n_folds * self.horizon)
self.by = self.horizon
def get_cv_method(method, **kwargs):
if method == 'kfold':
return KFold(**kwargs)
elif method == 'skfold':
return StratifiedKFold(**kwargs)
elif method == 'loo':
return LeaveOneOut()
elif method == 'shuffle_split':
return ShuffleSplit(**kwargs)
elif method == 'split':
return TrainTestSplit(**kwargs)
elif method == 's_shuffle_split':
return StratifiedShuffleSplit(**kwargs)
elif method == 'time_series':
return TimeSeriesSplit(**kwargs)
else:
raise AttributeError('Invalid CV method - %s!' % method)
|
mit
|
xavierwu/scikit-learn
|
sklearn/metrics/classification.py
|
95
|
67713
|
"""Metrics to assess performance on classification task given classe prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Jatin Shah <[email protected]>
# Saurabh Jha <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy.spatial.distance import hamming as sp_hamming
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from .base import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<http://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<http://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
with np.errstate(invalid='ignore'):
mcc = np.corrcoef(y_true, y_pred)[0, 1]
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<http://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
### Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
### Finally, we have all our sufficient statistics. Divide! ###
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
## Average the results ##
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['%s' % l for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<http://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred)
return (n_differences / (y_true.shape[0] * len(classes)))
elif y_type in ["binary", "multiclass"]:
return sp_hamming(y_true, y_pred)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<http://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
http://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
|
bsd-3-clause
|
marcsans/cnn-physics-perception
|
phy/lib/python2.7/site-packages/scipy/stats/_stats_mstats_common.py
|
12
|
8157
|
from collections import namedtuple
import numpy as np
from . import distributions
__all__ = ['_find_repeats', 'linregress', 'theilslopes']
def linregress(x, y=None):
"""
Calculate a linear least-squares regression for two sets of measurements.
Parameters
----------
x, y : array_like
Two sets of measurements. Both arrays should have the same length.
If only x is given (and y=None), then it must be a two-dimensional
array where one dimension has length 2. The two sets of measurements
are then found by splitting the array along the length-2 dimension.
Returns
-------
slope : float
slope of the regression line
intercept : float
intercept of the regression line
rvalue : float
correlation coefficient
pvalue : float
two-sided p-value for a hypothesis test whose null hypothesis is
that the slope is zero.
stderr : float
Standard error of the estimated gradient.
See also
--------
optimize.curve_fit : Use non-linear least squares to fit a function to data.
optimize.leastsq : Minimize the sum of squares of a set of equations.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = np.random.random(10)
>>> y = np.random.random(10)
>>> slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
# To get coefficient of determination (r_squared)
>>> print("r-squared:", r_value**2)
('r-squared:', 0.080402268539028335)
"""
TINY = 1.0e-20
if y is None: # x is a (2, N) or (N, 2) shaped array_like
x = np.asarray(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
msg = ("If only `x` is given as input, it has to be of shape "
"(2, N) or (N, 2), provided shape was %s" % str(x.shape))
raise ValueError(msg)
else:
x = np.asarray(x)
y = np.asarray(y)
if x.size == 0 or y.size == 0:
raise ValueError("Inputs must not be empty.")
n = len(x)
xmean = np.mean(x, None)
ymean = np.mean(y, None)
# average sum of squares:
ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat
r_num = ssxym
r_den = np.sqrt(ssxm * ssym)
if r_den == 0.0:
r = 0.0
else:
r = r_num / r_den
# test for numerical error propagation
if r > 1.0:
r = 1.0
elif r < -1.0:
r = -1.0
df = n - 2
t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))
prob = 2 * distributions.t.sf(np.abs(t), df)
slope = r_num / ssxm
intercept = ymean - slope*xmean
sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df)
LinregressResult = namedtuple('LinregressResult', ('slope', 'intercept',
'rvalue', 'pvalue',
'stderr'))
return LinregressResult(slope, intercept, r, prob, sterrest)
def theilslopes(y, x=None, alpha=0.95):
r"""
Computes the Theil-Sen estimator for a set of points (x, y).
`theilslopes` implements a method for robust linear regression. It
computes the slope as the median of all slopes between paired values.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
alpha : float, optional
Confidence degree between 0 and 1. Default is 95% confidence.
Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
interpreted as "find the 90% confidence interval".
Returns
-------
medslope : float
Theil slope.
medintercept : float
Intercept of the Theil line, as ``median(y) - medslope*median(x)``.
lo_slope : float
Lower bound of the confidence interval on `medslope`.
up_slope : float
Upper bound of the confidence interval on `medslope`.
Notes
-----
The implementation of `theilslopes` follows [1]_. The intercept is
not defined in [1]_, and here it is defined as ``median(y) -
medslope*median(x)``, which is given in [3]_. Other definitions of
the intercept exist in the literature. A confidence interval for
the intercept is not given as this question is not addressed in
[1]_.
References
----------
.. [1] P.K. Sen, "Estimates of the regression coefficient based on Kendall's tau",
J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968.
.. [2] H. Theil, "A rank-invariant method of linear and polynomial
regression analysis I, II and III", Nederl. Akad. Wetensch., Proc.
53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950.
.. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed.,
John Wiley and Sons, New York, pp. 493.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, num=150)
>>> y = x + np.random.normal(size=x.size)
>>> y[11:15] += 10 # add outliers
>>> y[-5:] -= 7
Compute the slope, intercept and 90% confidence interval. For comparison,
also compute the least-squares fit with `linregress`:
>>> res = stats.theilslopes(y, x, 0.90)
>>> lsq_res = stats.linregress(x, y)
Plot the results. The Theil-Sen regression line is shown in red, with the
dashed red lines illustrating the confidence interval of the slope (note
that the dashed red lines are not the confidence interval of the regression
as the confidence interval of the intercept is not included). The green
line shows the least-squares fit for comparison.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, y, 'b.')
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
>>> ax.plot(x, res[1] + res[2] * x, 'r--')
>>> ax.plot(x, res[1] + res[3] * x, 'r--')
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
>>> plt.show()
"""
# We copy both x and y so we can use _find_repeats.
y = np.array(y).flatten()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.array(x, dtype=float).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x)))
# Compute sorted slopes only when deltax > 0
deltax = x[:, np.newaxis] - x
deltay = y[:, np.newaxis] - y
slopes = deltay[deltax > 0] / deltax[deltax > 0]
slopes.sort()
medslope = np.median(slopes)
medinter = np.median(y) - medslope * np.median(x)
# Now compute confidence intervals
if alpha > 0.5:
alpha = 1. - alpha
z = distributions.norm.ppf(alpha / 2.)
# This implements (2.6) from Sen (1968)
_, nxreps = _find_repeats(x)
_, nyreps = _find_repeats(y)
nt = len(slopes) # N in Sen (1968)
ny = len(y) # n in Sen (1968)
# Equation 2.6 in Sen (1968):
sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) -
np.sum(k * (k-1) * (2*k + 5) for k in nxreps) -
np.sum(k * (k-1) * (2*k + 5) for k in nyreps))
# Find the confidence interval indices in `slopes`
sigma = np.sqrt(sigsq)
Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1)
Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0)
delta = slopes[[Rl, Ru]]
return medslope, medinter, delta[0], delta[1]
def _find_repeats(arr):
# This function assumes it may clobber its input.
if len(arr) == 0:
return np.array(0, np.float64), np.array(0, np.intp)
# XXX This cast was previously needed for the Fortran implementation,
# should we ditch it?
arr = np.asarray(arr, np.float64).ravel()
arr.sort()
# Taken from NumPy 1.9's np.unique.
change = np.concatenate(([True], arr[1:] != arr[:-1]))
unique = arr[change]
change_idx = np.concatenate(np.nonzero(change) + ([arr.size],))
freq = np.diff(change_idx)
atleast2 = freq > 1
return unique[atleast2], freq[atleast2]
|
mit
|
vibhorag/scikit-learn
|
sklearn/covariance/robust_covariance.py
|
198
|
29735
|
"""
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
|
bsd-3-clause
|
arahuja/scikit-learn
|
sklearn/linear_model/tests/test_sgd.py
|
13
|
43295
|
import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_auto(self):
# partial_fit with class_weight='auto' not supported
assert_raises_regexp(ValueError,
"class_weight 'auto' is not supported for "
"partial_fit. In order to use 'auto' weights, "
"use compute_class_weight\('auto', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='auto').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([3, 2])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([-1, -1])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([3, 2])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([-1, -1])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([-1, -1])
d = clf.decision_function([-1, -1])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([3, 2])
p = clf.predict_proba([3, 2])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([-1, -1])
p = clf.predict_proba([-1, -1])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([3, 2])
p = clf.predict_proba([3, 2])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function(x)
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba(x)
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_auto_weight(self):
# Test class weights for imbalanced data
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using automated class_weight
clf_auto = self.factory(alpha=0.0001, n_iter=1000,
class_weight="auto", shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_auto.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "auto"
assert_array_almost_equal(clf.coef_, clf_auto.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with auto class_weight enabled
clf = self.factory(n_iter=1000, class_weight="auto", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="auto", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
|
bsd-3-clause
|
cmoutard/mne-python
|
mne/viz/circle.py
|
4
|
15461
|
"""Functions to plot on circle as for connectivity
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
#
# License: Simplified BSD
from itertools import cycle
from functools import partial
import numpy as np
from .utils import plt_show
from ..externals.six import string_types
from ..fixes import tril_indices, normalize_colors
def circular_layout(node_names, node_order, start_pos=90, start_between=True,
group_boundaries=None, group_sep=10):
"""Create layout arranging nodes on a circle.
Parameters
----------
node_names : list of str
Node names.
node_order : list of str
List with node names defining the order in which the nodes are
arranged. Must have the elements as node_names but the order can be
different. The nodes are arranged clockwise starting at "start_pos"
degrees.
start_pos : float
Angle in degrees that defines where the first node is plotted.
start_between : bool
If True, the layout starts with the position between the nodes. This is
the same as adding "180. / len(node_names)" to start_pos.
group_boundaries : None | array-like
List of of boundaries between groups at which point a "group_sep" will
be inserted. E.g. "[0, len(node_names) / 2]" will create two groups.
group_sep : float
Group separation angle in degrees. See "group_boundaries".
Returns
-------
node_angles : array, shape=(len(node_names,))
Node angles in degrees.
"""
n_nodes = len(node_names)
if len(node_order) != n_nodes:
raise ValueError('node_order has to be the same length as node_names')
if group_boundaries is not None:
boundaries = np.array(group_boundaries, dtype=np.int)
if np.any(boundaries >= n_nodes) or np.any(boundaries < 0):
raise ValueError('"group_boundaries" has to be between 0 and '
'n_nodes - 1.')
if len(boundaries) > 1 and np.any(np.diff(boundaries) <= 0):
raise ValueError('"group_boundaries" must have non-decreasing '
'values.')
n_group_sep = len(group_boundaries)
else:
n_group_sep = 0
boundaries = None
# convert it to a list with indices
node_order = [node_order.index(name) for name in node_names]
node_order = np.array(node_order)
if len(np.unique(node_order)) != n_nodes:
raise ValueError('node_order has repeated entries')
node_sep = (360. - n_group_sep * group_sep) / n_nodes
if start_between:
start_pos += node_sep / 2
if boundaries is not None and boundaries[0] == 0:
# special case when a group separator is at the start
start_pos += group_sep / 2
boundaries = boundaries[1:] if n_group_sep > 1 else None
node_angles = np.ones(n_nodes, dtype=np.float) * node_sep
node_angles[0] = start_pos
if boundaries is not None:
node_angles[boundaries] += group_sep
node_angles = np.cumsum(node_angles)[node_order]
return node_angles
def _plot_connectivity_circle_onpick(event, fig=None, axes=None, indices=None,
n_nodes=0, node_angles=None,
ylim=[9, 10]):
"""Isolates connections around a single node when user left clicks a node.
On right click, resets all connections."""
if event.inaxes != axes:
return
if event.button == 1: # left click
# click must be near node radius
if not ylim[0] <= event.ydata <= ylim[1]:
return
# all angles in range [0, 2*pi]
node_angles = node_angles % (np.pi * 2)
node = np.argmin(np.abs(event.xdata - node_angles))
patches = event.inaxes.patches
for ii, (x, y) in enumerate(zip(indices[0], indices[1])):
patches[ii].set_visible(node in [x, y])
fig.canvas.draw()
elif event.button == 3: # right click
patches = event.inaxes.patches
for ii in range(np.size(indices, axis=1)):
patches[ii].set_visible(True)
fig.canvas.draw()
def plot_connectivity_circle(con, node_names, indices=None, n_lines=None,
node_angles=None, node_width=None,
node_colors=None, facecolor='black',
textcolor='white', node_edgecolor='black',
linewidth=1.5, colormap='hot', vmin=None,
vmax=None, colorbar=True, title=None,
colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
fontsize_title=12, fontsize_names=8,
fontsize_colorbar=8, padding=6.,
fig=None, subplot=111, interactive=True,
node_linewidth=2., show=True):
"""Visualize connectivity as a circular graph.
Note: This code is based on the circle graph example by Nicolas P. Rougier
http://www.labri.fr/perso/nrougier/coding/.
Parameters
----------
con : array
Connectivity scores. Can be a square matrix, or a 1D array. If a 1D
array is provided, "indices" has to be used to define the connection
indices.
node_names : list of str
Node names. The order corresponds to the order in con.
indices : tuple of arrays | None
Two arrays with indices of connections for which the connections
strenghts are defined in con. Only needed if con is a 1D array.
n_lines : int | None
If not None, only the n_lines strongest connections (strength=abs(con))
are drawn.
node_angles : array, shape=(len(node_names,)) | None
Array with node positions in degrees. If None, the nodes are equally
spaced on the circle. See mne.viz.circular_layout.
node_width : float | None
Width of each node in degrees. If None, the minimum angle between any
two nodes is used as the width.
node_colors : list of tuples | list of str
List with the color to use for each node. If fewer colors than nodes
are provided, the colors will be repeated. Any color supported by
matplotlib can be used, e.g., RGBA tuples, named colors.
facecolor : str
Color to use for background. See matplotlib.colors.
textcolor : str
Color to use for text. See matplotlib.colors.
node_edgecolor : str
Color to use for lines around nodes. See matplotlib.colors.
linewidth : float
Line width to use for connections.
colormap : str
Colormap to use for coloring the connections.
vmin : float | None
Minimum value for colormap. If None, it is determined automatically.
vmax : float | None
Maximum value for colormap. If None, it is determined automatically.
colorbar : bool
Display a colorbar or not.
title : str
The figure title.
colorbar_size : float
Size of the colorbar.
colorbar_pos : 2-tuple
Position of the colorbar.
fontsize_title : int
Font size to use for title.
fontsize_names : int
Font size to use for node names.
fontsize_colorbar : int
Font size to use for colorbar.
padding : float
Space to add around figure to accommodate long labels.
fig : None | instance of matplotlib.pyplot.Figure
The figure to use. If None, a new figure with the specified background
color will be created.
subplot : int | 3-tuple
Location of the subplot when creating figures with multiple plots. E.g.
121 or (1, 2, 1) for 1 row, 2 columns, plot 1. See
matplotlib.pyplot.subplot.
interactive : bool
When enabled, left-click on a node to show only connections to that
node. Right-click shows all connections.
node_linewidth : float
Line with for nodes.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.pyplot.Figure
The figure handle.
axes : instance of matplotlib.axes.PolarAxesSubplot
The subplot handle.
"""
import matplotlib.pyplot as plt
import matplotlib.path as m_path
import matplotlib.patches as m_patches
n_nodes = len(node_names)
if node_angles is not None:
if len(node_angles) != n_nodes:
raise ValueError('node_angles has to be the same length '
'as node_names')
# convert it to radians
node_angles = node_angles * np.pi / 180
else:
# uniform layout on unit circle
node_angles = np.linspace(0, 2 * np.pi, n_nodes, endpoint=False)
if node_width is None:
# widths correspond to the minimum angle between two nodes
dist_mat = node_angles[None, :] - node_angles[:, None]
dist_mat[np.diag_indices(n_nodes)] = 1e9
node_width = np.min(np.abs(dist_mat))
else:
node_width = node_width * np.pi / 180
if node_colors is not None:
if len(node_colors) < n_nodes:
node_colors = cycle(node_colors)
else:
# assign colors using colormap
node_colors = [plt.cm.spectral(i / float(n_nodes))
for i in range(n_nodes)]
# handle 1D and 2D connectivity information
if con.ndim == 1:
if indices is None:
raise ValueError('indices has to be provided if con.ndim == 1')
elif con.ndim == 2:
if con.shape[0] != n_nodes or con.shape[1] != n_nodes:
raise ValueError('con has to be 1D or a square matrix')
# we use the lower-triangular part
indices = tril_indices(n_nodes, -1)
con = con[indices]
else:
raise ValueError('con has to be 1D or a square matrix')
# get the colormap
if isinstance(colormap, string_types):
colormap = plt.get_cmap(colormap)
# Make figure background the same colors as axes
if fig is None:
fig = plt.figure(figsize=(8, 8), facecolor=facecolor)
# Use a polar axes
if not isinstance(subplot, tuple):
subplot = (subplot,)
axes = plt.subplot(*subplot, polar=True, axisbg=facecolor)
# No ticks, we'll put our own
plt.xticks([])
plt.yticks([])
# Set y axes limit, add additonal space if requested
plt.ylim(0, 10 + padding)
# Remove the black axes border which may obscure the labels
axes.spines['polar'].set_visible(False)
# Draw lines between connected nodes, only draw the strongest connections
if n_lines is not None and len(con) > n_lines:
con_thresh = np.sort(np.abs(con).ravel())[-n_lines]
else:
con_thresh = 0.
# get the connections which we are drawing and sort by connection strength
# this will allow us to draw the strongest connections first
con_abs = np.abs(con)
con_draw_idx = np.where(con_abs >= con_thresh)[0]
con = con[con_draw_idx]
con_abs = con_abs[con_draw_idx]
indices = [ind[con_draw_idx] for ind in indices]
# now sort them
sort_idx = np.argsort(con_abs)
con_abs = con_abs[sort_idx]
con = con[sort_idx]
indices = [ind[sort_idx] for ind in indices]
# Get vmin vmax for color scaling
if vmin is None:
vmin = np.min(con[np.abs(con) >= con_thresh])
if vmax is None:
vmax = np.max(con)
vrange = vmax - vmin
# We want to add some "noise" to the start and end position of the
# edges: We modulate the noise with the number of connections of the
# node and the connection strength, such that the strongest connections
# are closer to the node center
nodes_n_con = np.zeros((n_nodes), dtype=np.int)
for i, j in zip(indices[0], indices[1]):
nodes_n_con[i] += 1
nodes_n_con[j] += 1
# initalize random number generator so plot is reproducible
rng = np.random.mtrand.RandomState(seed=0)
n_con = len(indices[0])
noise_max = 0.25 * node_width
start_noise = rng.uniform(-noise_max, noise_max, n_con)
end_noise = rng.uniform(-noise_max, noise_max, n_con)
nodes_n_con_seen = np.zeros_like(nodes_n_con)
for i, (start, end) in enumerate(zip(indices[0], indices[1])):
nodes_n_con_seen[start] += 1
nodes_n_con_seen[end] += 1
start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start]) /
float(nodes_n_con[start]))
end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end]) /
float(nodes_n_con[end]))
# scale connectivity for colormap (vmin<=>0, vmax<=>1)
con_val_scaled = (con - vmin) / vrange
# Finally, we draw the connections
for pos, (i, j) in enumerate(zip(indices[0], indices[1])):
# Start point
t0, r0 = node_angles[i], 10
# End point
t1, r1 = node_angles[j], 10
# Some noise in start and end point
t0 += start_noise[pos]
t1 += end_noise[pos]
verts = [(t0, r0), (t0, 5), (t1, 5), (t1, r1)]
codes = [m_path.Path.MOVETO, m_path.Path.CURVE4, m_path.Path.CURVE4,
m_path.Path.LINETO]
path = m_path.Path(verts, codes)
color = colormap(con_val_scaled[pos])
# Actual line
patch = m_patches.PathPatch(path, fill=False, edgecolor=color,
linewidth=linewidth, alpha=1.)
axes.add_patch(patch)
# Draw ring with colored nodes
height = np.ones(n_nodes) * 1.0
bars = axes.bar(node_angles, height, width=node_width, bottom=9,
edgecolor=node_edgecolor, lw=node_linewidth,
facecolor='.9', align='center')
for bar, color in zip(bars, node_colors):
bar.set_facecolor(color)
# Draw node labels
angles_deg = 180 * node_angles / np.pi
for name, angle_rad, angle_deg in zip(node_names, node_angles, angles_deg):
if angle_deg >= 270:
ha = 'left'
else:
# Flip the label, so text is always upright
angle_deg += 180
ha = 'right'
axes.text(angle_rad, 10.4, name, size=fontsize_names,
rotation=angle_deg, rotation_mode='anchor',
horizontalalignment=ha, verticalalignment='center',
color=textcolor)
if title is not None:
plt.title(title, color=textcolor, fontsize=fontsize_title,
axes=axes)
if colorbar:
norm = normalize_colors(vmin=vmin, vmax=vmax)
sm = plt.cm.ScalarMappable(cmap=colormap, norm=norm)
sm.set_array(np.linspace(vmin, vmax))
cb = plt.colorbar(sm, ax=axes, use_gridspec=False,
shrink=colorbar_size,
anchor=colorbar_pos)
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
cb.ax.tick_params(labelsize=fontsize_colorbar)
plt.setp(cb_yticks, color=textcolor)
# Add callback for interaction
if interactive:
callback = partial(_plot_connectivity_circle_onpick, fig=fig,
axes=axes, indices=indices, n_nodes=n_nodes,
node_angles=node_angles)
fig.canvas.mpl_connect('button_press_event', callback)
plt_show(show)
return fig, axes
|
bsd-3-clause
|
zlongshen/InertialNav
|
code/plot_glitchOffset.py
|
6
|
3806
|
#!/bin/python
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
import numpy as np
import math
# timestamp, velN, velE, velD, posN, posE, posOffN, posOffE, velOffN, velOffE
data = np.genfromtxt('GlitchOffsetOut.txt', delimiter=' ', skip_header=1,
skip_footer=1, names=['time', 'velN', 'velE', 'velD', 'posN', 'posE', 'posD', 'posOffN', 'posOffE', 'velOffN', 'velOffE'])
try:
data2 = np.genfromtxt('StateDataOut.txt', delimiter=' ', skip_header=1,
skip_footer=1, names=['time', 'q1', 'q2', 'q3', 'q4', 'Vn', 'Ve', 'Vd', 'Pn', 'Pe', 'Pd',
'Bx', 'By', 'Bz', 'Aoff', 'Wn', 'We', 'Mn', 'Me', 'Md', 'Mbn', 'Mbe', 'Mbd', 'dist'])
except ValueError:
try:
data2 = np.genfromtxt('StateDataOut.txt', delimiter=' ', skip_header=1,
skip_footer=1, names=['time', 'q1', 'q2', 'q3', 'q4', 'Vn', 'Ve', 'Vd', 'Pn', 'Pe', 'Pd',
'Bx', 'By', 'Bz', 'Aoff', 'Wn', 'We', 'Mn', 'Me', 'Md', 'Mbn', 'Mbe', 'Mbd'])
except ValueError:
data2 = np.genfromtxt('StateDataOut.txt', delimiter=' ', skip_header=1,
skip_footer=1, names=['time', 'q1', 'q2', 'q3', 'q4', 'Vn', 'Ve', 'Vd', 'Pn', 'Pe', 'Pd',
'Bx', 'By', 'Bz', 'Wn', 'We', 'Mn', 'Me', 'Md', 'Mbn', 'Mbe', 'Mbd'])
fig = plt.figure(10)
ax1 = fig.add_subplot(211)
ax1.set_title("GPS glitch offset")
ax1.set_ylabel('offset (m)')
ax1.plot(data['time'], data['posOffN'], color='b', label='posOffN')
ax1.plot(data['time'], data['posOffE'], color='r', label='posOffE')
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(handles, labels, loc=2)
ax2 = fig.add_subplot(212)
ax2.set_xlabel('time (s)')
ax2.set_ylabel('offset (m/sec)')
ax2.plot(data['time'], data['velOffN'], color='b', label='velOffN')
ax2.plot(data['time'], data['velOffE'], color='r', label='velOffE')
handles, labels = ax2.get_legend_handles_labels()
ax2.legend(handles, labels, loc=2)
plt.savefig('GPSglitchOffset.png', bbox_inches='tight')
fig = plt.figure(11)
ax1 = fig.add_subplot(211)
ax1.set_title("GPS pos vs EKF pos")
ax1 = fig.add_subplot(311)
ax1.set_ylabel('(m)')
ax1.plot(data['time'], data['posN'], color='r', label='posN')
ax1.plot(data2['time'], data2['Pn'], color='b', label='Pn')
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(handles, labels, loc=2)
ax2 = fig.add_subplot(312)
ax2.set_ylabel('(m)')
ax2.plot(data['time'], data['posE'], color='r', label='posE')
ax2.plot(data2['time'], data2['Pe'], color='b', label='Pe')
handles, labels = ax2.get_legend_handles_labels()
ax2.legend(handles, labels, loc=2)
ax3 = fig.add_subplot(313)
ax3.set_xlabel('time (s)')
ax3.set_ylabel('(m)')
ax3.plot(data['time'], -data['posD'], color='r', label='posD')
ax3.plot(data2['time'], data2['Pd'], color='b', label='Pd')
handles, labels = ax3.get_legend_handles_labels()
ax3.legend(handles, labels, loc=2)
plt.savefig('GPSdata.png', bbox_inches='tight')
fig = plt.figure(12)
ax1 = fig.add_subplot(311)
ax1.set_title("GPS velNED vs. EKF vel")
ax1.set_ylabel('(m/sec)')
ax1.plot(data['time'], data['velN'], color='r', label='velN')
ax1.plot(data2['time'], data2['Vn'], color='b', label='Vn')
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(handles, labels, loc=2)
ax2 = fig.add_subplot(312)
ax2.set_ylabel('(m/sec)')
ax2.plot(data['time'], data['velE'], color='r', label='velE')
ax2.plot(data2['time'], data2['Ve'], color='b', label='Ve')
handles, labels = ax2.get_legend_handles_labels()
ax2.legend(handles, labels, loc=2)
ax3 = fig.add_subplot(313)
ax3.set_xlabel('time (s)')
ax3.set_ylabel('(m/sec)')
ax3.plot(data['time'], data['velD'], color='r', label='velD')
ax3.plot(data2['time'], data2['Vd'], color='b', label='Vd')
handles, labels = ax3.get_legend_handles_labels()
ax3.legend(handles, labels, loc=2)
plt.savefig('GPS_EKF_velNED.png', bbox_inches='tight')
plt.show()
|
bsd-3-clause
|
dsullivan7/scikit-learn
|
sklearn/preprocessing/label.py
|
13
|
28598
|
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import deprecated, column_or_1d
from ..utils.validation import check_array
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelEncoder was not fitted yet.")
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self._check_fitted()
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
self._check_fitted()
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-sequences',
'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
@property
@deprecated("Attribute ``indicator_matrix_`` is deprecated and will be "
"removed in 0.17. Use ``y_type_ == 'multilabel-indicator'`` "
"instead")
def indicator_matrix_(self):
return self.y_type_ == 'multilabel-indicator'
@property
@deprecated("Attribute ``multilabel_`` is deprecated and will be removed "
"in 0.17. Use ``y_type_.startswith('multilabel')`` "
"instead")
def multilabel_(self):
return self.y_type_.startswith('multilabel')
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelBinarizer was not fitted yet.")
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
self._check_fitted()
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
self._check_fitted()
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1,
sparse_output=False, multilabel=None):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
if multilabel is not None:
warnings.warn("The multilabel parameter is deprecated as of version "
"0.15 and will be removed in 0.17. The parameter is no "
"longer necessary because the value is automatically "
"inferred.", DeprecationWarning)
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
elif y_type == "multilabel-sequences":
Y = MultiLabelBinarizer(classes=classes,
sparse_output=sparse_output).fit_transform(y)
if sp.issparse(Y):
Y.data[:] = pos_label
else:
Y[Y == 1] = pos_label
return Y
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
elif output_type == "multilabel-sequences":
warnings.warn('Direct support for sequence of sequences multilabel '
'representation will be unavailable from version 0.17. '
'Use sklearn.preprocessing.MultiLabelBinarizer to '
'convert to a label indicator representation.',
DeprecationWarning)
mlb = MultiLabelBinarizer(classes=classes).fit([])
return mlb.inverse_transform(y)
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
|
bsd-3-clause
|
mengli/PcmAudioRecorder
|
utils/udacity_data.py
|
2
|
2876
|
import scipy.misc
import random
import pandas as pd
import tensorflow as tf
#points to the end of the last batch
train_batch_pointer = 0
val_batch_pointer = 0
train_xs = []
train_ys = []
val_xs = []
val_ys = []
TRAIN_IMG_PREFIX = "/usr/local/google/home/limeng/Downloads/udacity/ch2_002/output/HMB_%s/"
TRAIN_CSV = "/usr/local/google/home/limeng/Downloads/udacity/ch2_002/output/HMB_%s/interpolated.csv"
VAL_IMG_PREFIX = "/usr/local/google/home/limeng/Downloads/udacity/test/HMB_3/"
VAL_CSV = "/usr/local/google/home/limeng/Downloads/udacity/test/HMB_3/interpolated.csv"
NUM_TRAIN_IMAGES = 33808
NUM_VAL_IMAGES = 5279
def read_csv(csv_file_name, img_prefix):
x_out = []
data_csv = pd.read_csv(csv_file_name)
data = data_csv[[x.startswith("center") for x in data_csv["filename"]]]
for file_name in data["filename"]:
x_out.append(img_prefix + file_name)
return x_out, data["angle"]
def read_data(shuffe=True):
global train_xs
global train_ys
global val_xs
global val_ys
# Read train set
for idx in range(1, 7):
if idx == 3:
continue
x_out, y_out = read_csv(TRAIN_CSV % idx, TRAIN_IMG_PREFIX % idx)
train_xs.extend(x_out)
train_ys.extend(y_out)
# Read val set
val_xs, val_ys = read_csv(VAL_CSV, VAL_IMG_PREFIX)
#shuffle train set
c = list(zip(train_xs, train_ys))
if shuffe:
random.shuffle(c)
# with open("train.txt", 'a') as out:
# for item in c:
# out.write("%s %.10f\n" % (item[0], item[1]))
train_xs, train_ys = zip(*c)
#shuffle val set
c = list(zip(val_xs, val_ys))
# with open("val.txt", 'a') as out:
# for item in c:
# out.write("%s %.10f\n" % (item[0], item[1]))
if shuffe:
random.shuffle(c)
val_xs, val_ys = zip(*c)
def load_train_batch(batch_size):
global train_batch_pointer
global train_xs
global train_ys
x_out = []
y_out = []
for i in range(0, batch_size):
image = scipy.misc.imread(train_xs[(train_batch_pointer + i) % NUM_TRAIN_IMAGES], mode="RGB")
x_out.append(scipy.misc.imresize(image[-300:], [66, 200]) / 255.0)
y_out.append([train_ys[(train_batch_pointer + i) % NUM_TRAIN_IMAGES]])
train_batch_pointer += batch_size
return x_out, y_out
def load_val_batch(batch_size):
global val_batch_pointer
global val_xs
global val_ys
x_out = []
y_out = []
for i in range(0, batch_size):
image = scipy.misc.imread(val_xs[(val_batch_pointer + i) % NUM_VAL_IMAGES], mode="RGB")
x_out.append(scipy.misc.imresize(image[-300:], [66, 200]) / 255.0)
y_out.append([val_ys[(val_batch_pointer + i) % NUM_VAL_IMAGES]])
val_batch_pointer += batch_size
return x_out, y_out
def main(_):
read_data()
if __name__ == '__main__':
tf.app.run(main=main)
|
apache-2.0
|
liyu1990/sklearn
|
sklearn/metrics/tests/test_pairwise.py
|
17
|
25508
|
import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels(): # Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear",
"chi2", "additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {'gamma': 0.1}
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_laplacian_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = laplacian_kernel(X, X)
# the diagonal elements of a laplacian kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(5))
# off-diagonal elements are < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
|
bsd-3-clause
|
nvoron23/statsmodels
|
statsmodels/examples/l1_demo/sklearn_compare.py
|
33
|
3710
|
"""
For comparison with sklearn.linear_model.LogisticRegression
Computes a regularzation path with both packages. The coefficient values in
either path are related by a "constant" in the sense that for any fixed
value of the constraint C and log likelihood, there exists an l1
regularization constant alpha such that the optimal solutions should be
the same. Note that alpha(C) is a nonlinear function in general. Here we
find alpha(C) by finding a reparameterization of the statsmodels path that
makes the paths match up. An equation is available, but to use it I would
need to hack the sklearn code to extract the gradient of the log
likelihood.
The results "prove" that the regularization paths are the same. Note that
finding the reparameterization is non-trivial since the coefficient paths
are NOT monotonic. As a result, the paths don't match up perfectly.
"""
from __future__ import print_function
from statsmodels.compat.python import range, lrange
from sklearn import linear_model
from sklearn import datasets
import statsmodels.api as sm
import numpy as np
import matplotlib.pyplot as plt
import pdb # pdb.set_trace
import sys
## Decide which dataset to use
# Use either spector or anes96
use_spector = False
#### Load data
## The Spector and Mazzeo (1980) data from statsmodels
if use_spector:
spector_data = sm.datasets.spector.load()
X = spector_data.exog
Y = spector_data.endog
else:
raise Exception(
"The anes96 dataset is now loaded in as a short version that cannot "\
"be used here")
anes96_data = sm.datasets.anes96.load_pandas()
Y = anes96_data.exog.vote
#### Fit and plot results
N = 200 # number of points to solve at
K = X.shape[1]
## Statsmodels
logit_mod = sm.Logit(Y, X)
sm_coeff = np.zeros((N, K)) # Holds the coefficients
if use_spector:
alphas = 1 / np.logspace(-1, 2, N) # for spector_data
else:
alphas = 1 / np.logspace(-3, 2, N) # for anes96_data
for n, alpha in enumerate(alphas):
logit_res = logit_mod.fit_regularized(
method='l1', alpha=alpha, disp=False, trim_mode='off')
sm_coeff[n,:] = logit_res.params
## Sklearn
sk_coeff = np.zeros((N, K))
if use_spector:
Cs = np.logspace(-0.45, 2, N)
else:
Cs = np.logspace(-2.6, 0, N)
for n, C in enumerate(Cs):
clf = linear_model.LogisticRegression(
C=C, penalty='l1', fit_intercept=False)
clf.fit(X, Y)
sk_coeff[n, :] = clf.coef_
## Get the reparametrization of sm_coeff that makes the paths equal
# Do this by finding one single re-parameterization of the second coefficient
# that makes the path for the second coefficient (almost) identical. This
# same parameterization will work for the other two coefficients since the
# the regularization coefficients (in sk and sm) are related by a constant.
#
# special_X is chosen since this coefficient becomes non-zero before the
# other two...and is relatively monotonic...with both datasets.
sk_special_X = np.fabs(sk_coeff[:,2])
sm_special_X = np.fabs(sm_coeff[:,2])
s = np.zeros(N)
# Note that sk_special_X will not always be perfectly sorted...
s = np.searchsorted(sk_special_X, sm_special_X)
## Plot
plt.figure(2);plt.clf();plt.grid()
plt.xlabel('Index in sklearn simulation')
plt.ylabel('Coefficient value')
plt.title('Regularization Paths')
colors = ['b', 'r', 'k', 'g', 'm', 'c', 'y']
for coeff, name in [(sm_coeff, 'sm'), (sk_coeff, 'sk')]:
if name == 'sk':
ltype = 'x' # linetype
t = lrange(N) # The 'time' parameter
else:
ltype = 'o'
t = s
for i in range(K):
plt.plot(t, coeff[:,i], ltype+colors[i], label=name+'-X'+str(i))
plt.legend(loc='best')
plt.show()
|
bsd-3-clause
|
CPBridge/fetal_heart_analysis
|
scripts/analysis/particles_graphs.py
|
1
|
9909
|
#!/usr/bin/python
import matplotlib.pyplot as plt # plot
import matplotlib.cm as cm
import argparse as ap # parser for arguments
import CPBUtils as ut
from test_argparse import test_arg_parser
import numpy as np
intra_observer_colour = 'orange'
inter_observer_colour = 'magenta'
intra_observer_linestyle = inter_observer_linestyle = '--'
intra_observer_linewidth = inter_observer_linewidth = 3
parser = ap.ArgumentParser(description='Plot accuracy against time plots for classification/orientation/phase for different numbers of particles')
parser.add_argument('results_directory',help="directory containing the results, in folders according to model name")
parser.add_argument('--train_experiment_definitions','-r',nargs='+',help="file containing all of the training experiments to be displayed")
parser.add_argument('--test_experiment_definitions','-e',nargs='+',help="file containing all of the testing experiments to be displayed")
parser.add_argument('--filter_file_names','-z',nargs='+',help='results are found in a directory relating to the filterfile used with this name')
parser.add_argument('--output','-o',help="output summary to standard output",action="store_true")
parser.add_argument('--legend','-l',help="display the legend",action="store_true")
parser.add_argument('--annotations','-a',help="annotate the levels next to each point, may be very cluttered",action="store_true")
parser.add_argument('--summary_file_name','-s',help="name of the summary file to use within each directory",default="summary")
parser.add_argument('--inter_observer_annotation','-m',help="a summary file to use as an inter-rater annotation reference",default="none")
parser.add_argument('--intra_observer_annotation','-n',help="a summary file to use as an intra-rater annotation reference",default="none")
parser.add_argument('--structures_results','-S',help="also plot structures results",action="store_true")
parser.add_argument('--time_limits','-t',help="limits on the time axis for structure plot only",type=int,nargs=2)
parser.add_argument('--localisation_limits','-L',help="limits on the localisation axis",type=float,nargs=2)
parser.add_argument('--legend_entries','-k',help="how the models should appear in the legend",nargs='+')
#parser.add_argument('--no_display','-n',action='store_true',help='Supress displaying the plot')
#parser.add_argument('--write_pdf_stem','-w',help='Write a pdf of each plot with this stem')
args = parser.parse_args()
# Directory where results are stored
resultsdir = args.results_directory
if (len(args.train_experiment_definitions) != len(args.test_experiment_definitions)) or (len(args.train_experiment_definitions) != len(args.filter_file_names)):
print "ERROR: Number of arguments for train_experiment_definitions, test_experiment_definitions and filter_file_names must match"
exit()
# Load manual annotations if required
draw_inter_observer = args.inter_observer_annotation != "none"
if draw_inter_observer :
inter_observer_accuracy,_,inter_observer_ori_error,inter_observer_phase_error = ut.accuracyFromSummaryFile(args.inter_observer_annotation)
# Load manual annotations if required
draw_intra_observer = args.intra_observer_annotation != "none"
if draw_intra_observer :
intra_observer_accuracy,_,intra_observer_ori_error,intra_observer_phase_error = ut.accuracyFromSummaryFile(args.intra_observer_annotation)
# Detection Figure
detection_fig = plt.figure(figsize=(10,6))
plt.xlabel("Detection Error (%)",fontweight='bold', fontsize='large')
plt.ylabel("Average Time per Frame (ms)",fontweight='bold', fontsize='large')
plt.tick_params(axis='both', which='major', labelsize=16)
plt.grid(b=True, which='major', linestyle='--')
detection_fig.patch.set_facecolor('white')
detection_fig.tight_layout()
# Add manual accuracy lines
if draw_inter_observer :
plt.axvline(100.0*(1.0-inter_observer_accuracy),linestyle=inter_observer_linestyle,color=inter_observer_colour,lw=inter_observer_linewidth)
if draw_intra_observer :
plt.axvline(100.0*(1.0-intra_observer_accuracy),linestyle=intra_observer_linestyle,color=intra_observer_colour,lw=intra_observer_linewidth)
# Orientation Figure
ori_fig = plt.figure(figsize=(10,6))
plt.xlabel("Orientation Error",fontweight='bold', fontsize='large')
plt.ylabel("Average Time per Frame (ms)",fontweight='bold', fontsize='large')
plt.tick_params(axis='both', which='major', labelsize=16)
plt.grid(b=True, which='major', linestyle='--')
# Add manual accuracy lines
if draw_inter_observer :
plt.axvline(inter_observer_ori_error,linestyle=inter_observer_linestyle,color=inter_observer_colour,lw=inter_observer_linewidth)
if draw_intra_observer :
plt.axvline(intra_observer_ori_error,linestyle=intra_observer_linestyle,color=intra_observer_colour,lw=intra_observer_linewidth)
ori_fig.patch.set_facecolor('white')
ori_fig.tight_layout()
# Phase figure
phase_fig = plt.figure(figsize=(10,6))
plt.xlabel("Cardiac Phase Error",fontweight='bold', fontsize='large')
plt.ylabel("Average Time per Frame (ms)",fontweight='bold', fontsize='large')
plt.tick_params(axis='both', which='major', labelsize=16)
plt.grid(b=True, which='major', linestyle='--')
# Add a manual accuracy line
if draw_inter_observer :
plt.axvline(inter_observer_phase_error,linestyle=inter_observer_linestyle,color=inter_observer_colour,lw=inter_observer_linewidth)
if draw_intra_observer :
plt.axvline(intra_observer_phase_error,linestyle=intra_observer_linestyle,color=intra_observer_colour,lw=intra_observer_linewidth)
phase_fig.patch.set_facecolor('white')
phase_fig.tight_layout()
# Structures figure
if args.structures_results:
structs_fig = plt.figure(figsize=(10,6))
plt.xlabel("Localisation Error",fontweight='bold', fontsize='large')
plt.ylabel("Average Time per Frame (ms)",fontweight='bold', fontsize='large')
plt.tick_params(axis='both', which='major', labelsize=16)
plt.grid(b=True, which='major', linestyle='--')
structs_fig.patch.set_facecolor('white')
structs_fig.tight_layout()
# Loop over the train and test files
for train_model_file, test_model_file, filter_name in zip(args.train_experiment_definitions,args.test_experiment_definitions,args.filter_file_names):
train_model_list, test_model_list, time_table, accuracy_table, ori_error_table, phase_error_table = ut.gatherAccuracyStatsTrainTest(train_model_file,test_model_file,resultsdir,summary_file_name=args.summary_file_name,transpose=True,filtername=filter_name)
# Can only process training experiment files with one training experiments
if len(train_model_list) > 1:
print "ERROR: Training experiment file", train_model_file , "contains multiple experiments"
exit()
# Get the lists for the first and only training experiment
train_model = train_model_list[0].replace('square_','rec_')
legend_entry = train_model if args.legend_entries is None else args.legend_entries[args.test_experiment_definitions.index(test_model_file)]
time_list = time_table[0]
accuracy_list = accuracy_table[0]
ori_error_list = ori_error_table[0]
phase_error_list = phase_error_table[0]
# Read in structures results
if args.structures_results:
struct_dist_table,_,_ = ut.getStructureDataTrainTest(train_model_file,test_model_file,filter_name,resultsdir,summary_file_name=args.summary_file_name,transpose=True)
struct_dist_list = struct_dist_table[0]
# Read in the test arguments
with open(test_model_file,'r') as test_file:
test_args = [line.split()[1:] for line in test_file]
# Get a list of the tree numbers in each model
particles_list = [test_arg_parser.parse_args(a).n_particles for a in test_args]
# Detection Error
plt.figure(detection_fig.number)
detection_error = [ 100.0*(1.0-x) for x in accuracy_list]
plt.plot(detection_error,time_list,'-o',label=legend_entry)
for x,y,p in zip(detection_error,time_list,particles_list):
plt.annotate(str(p),
xy=(x, y), xytext=(40, 20),
textcoords='offset points', ha='right', va='bottom',fontsize='small',
arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
# Orientation Error
plt.figure(ori_fig.number)
plt.plot(ori_error_list,time_list,'-o',label=legend_entry)
for x,y,p in zip(ori_error_list,time_list,particles_list):
plt.annotate(str(p),
xy=(x, y), xytext=(40, 20),
textcoords='offset points', ha='right', va='bottom',fontsize='small',
arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
# Cardiac Phase Error
plt.figure(phase_fig.number)
plt.plot(phase_error_list,time_list,'-o',label=legend_entry)
for x,y,p in zip(phase_error_list,time_list,particles_list):
plt.annotate(str(p),
xy=(x, y), xytext=(40, 20),
textcoords='offset points', ha='right', va='bottom',fontsize='small',
arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
# Structure Localisation
if args.structures_results:
plt.figure(structs_fig.number)
# Choose which colours to use for each structure
colourindex = np.linspace(0.0,1.0,num=len(struct_dist_list[0]))
colours = cm.rainbow(colourindex)
for struct,c in zip(struct_dist_list[0],colours) :
this_struct_dist_list = [dist_dict[struct] for dist_dict in struct_dist_list]
plt.plot(this_struct_dist_list,time_list,'-o',label=struct,color=c)
# Choose the structure with the lowest localisation error as the location
# for the
annotate_x_points = [d[min(d,key=d.get)] for d in struct_dist_list]
for x,y,p in zip(annotate_x_points,time_list,particles_list):
plt.annotate(str(p),
xy=(x, y), xytext=(-40, 0),
textcoords='offset points', ha='right', va='center',fontsize='small',
arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
if args.time_limits is not None:
plt.ylim(args.time_limits)
if args.localisation_limits is not None:
plt.xlim(args.localisation_limits)
plt.legend(fontsize='small')
# Add legends
for num in [detection_fig.number,ori_fig.number,phase_fig.number]:
plt.figure(num)
plt.legend()
plt.show()
|
gpl-3.0
|
wateraccounting/SEBAL
|
hants_old/wa_gdal/main.py
|
1
|
17674
|
# -*- coding: utf-8 -*-
"""
Authors: Gonzalo E. Espinoza-Dávalos, Wim G.M. Bastiaanssen, Boaz Bett, and
Xueliang Cai
IHE Delft 2017
Contact: [email protected]
Repository: https://github.com/gespinoza/hants
Module: hants
"""
from __future__ import division
import netCDF4
import pandas as pd
import math
from .davgis.functions import (Spatial_Reference, List_Datasets, Clip,
Resample, Raster_to_Array, NetCDF_to_Raster)
import os
import tempfile
from copy import deepcopy
import matplotlib.pyplot as plt
import warnings
def run_HANTS(rasters_path_inp, name_format,
start_date, end_date, latlim, lonlim, cellsize, nc_path,
nb, nf, HiLo, low, high, fet, dod, delta,
epsg=4326, fill_val=-9999.0,
rasters_path_out=None, export_hants_only=False):
'''
This function runs the python implementation of the HANTS algorithm. It
takes a folder with geotiffs raster data as an input, creates a netcdf
file, and optionally export the data back to geotiffs.
'''
create_netcdf(rasters_path_inp, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path,
epsg, fill_val)
HANTS_netcdf(nc_path, nb, nf, HiLo, low, high, fet, dod, delta,
fill_val)
#if rasters_path_out:
#export_tiffs(rasters_path_out, nc_path, name_format, export_hants_only)
return nc_path
def create_netcdf(rasters_path, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path,
epsg=4326, fill_val=-9999.0):
'''
This function creates a netcdf file from a folder with geotiffs rasters to
be used to run HANTS.
'''
# Latitude and longitude
lat_ls = pd.np.arange(latlim[0] + 0.5*cellsize, latlim[1] + 0.5*cellsize,
cellsize)
lat_ls = lat_ls[::-1] # ArcGIS numpy
lon_ls = pd.np.arange(lonlim[0] + 0.5*cellsize, lonlim[1] + 0.5*cellsize,
cellsize)
lat_n = len(lat_ls)
lon_n = len(lon_ls)
spa_ref = Spatial_Reference(epsg)
ll_corner = [lonlim[0], latlim[0]]
# Rasters
dates_dt = pd.date_range(start_date, end_date, freq='D')
dates_ls = [d.strftime('%Y%m%d') for d in dates_dt]
ras_ls = List_Datasets(rasters_path, 'tif')
# Cell code
temp_ll_ls = [pd.np.arange(x, x + lon_n)
for x in range(1, lat_n*lon_n, lon_n)]
code_ls = pd.np.array(temp_ll_ls)
empty_vec = pd.np.empty((lat_n, lon_n))
empty_vec[:] = fill_val
# Create netcdf file
print('Creating netCDF file...')
nc_file = netCDF4.Dataset(nc_path, 'w', format="NETCDF4")
# Create Dimensions
lat_dim = nc_file.createDimension('latitude', lat_n)
lon_dim = nc_file.createDimension('longitude', lon_n)
time_dim = nc_file.createDimension('time', len(dates_ls))
# Create Variables
crs_var = nc_file.createVariable('crs', 'i4')
crs_var.grid_mapping_name = 'latitude_longitude'
crs_var.crs_wkt = spa_ref
lat_var = nc_file.createVariable('latitude', 'f8', ('latitude'),
fill_value=fill_val)
lat_var.units = 'degrees_north'
lat_var.standard_name = 'latitude'
lon_var = nc_file.createVariable('longitude', 'f8', ('longitude'),
fill_value=fill_val)
lon_var.units = 'degrees_east'
lon_var.standard_name = 'longitude'
time_var = nc_file.createVariable('time', 'l', ('time'),
fill_value=fill_val)
time_var.standard_name = 'time'
time_var.calendar = 'gregorian'
code_var = nc_file.createVariable('code', 'i4', ('latitude', 'longitude'),
fill_value=fill_val)
outliers_var = nc_file.createVariable('outliers', 'i4',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
outliers_var.long_name = 'outliers'
original_var = nc_file.createVariable('original_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
original_var.long_name = 'original values'
hants_var = nc_file.createVariable('hants_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
hants_var.long_name = 'hants values'
combined_var = nc_file.createVariable('combined_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
combined_var.long_name = 'combined values'
print('\tVariables created')
# Load data
lat_var[:] = lat_ls
lon_var[:] = lon_ls
time_var[:] = dates_ls
code_var[:] = code_ls
# temp folder
temp_dir = tempfile.mkdtemp()
bbox = [lonlim[0], latlim[0], lonlim[1], latlim[1]]
# Raster loop
print('\tExtracting data from rasters...')
for tt in range(len(dates_ls)):
# Raster
ras = name_format.format(dates_ls[tt])
if ras in ras_ls:
# Resample
ras_resampled = os.path.join(temp_dir, 'r_' + ras)
Resample(os.path.join(rasters_path, ras), ras_resampled, cellsize)
# Clip
ras_clipped = os.path.join(temp_dir, 'c_' + ras)
Clip(ras_resampled, ras_clipped, bbox)
# Raster to Array
array = Raster_to_Array(ras_resampled,
ll_corner, lon_n, lat_n,
values_type='float32')
# Store values
original_var[:, :, tt] = array
else:
# Store values
original_var[:, :, tt] = empty_vec
# Close file
nc_file.close()
print('NetCDF file created')
# Return
return nc_path
def HANTS_netcdf(nc_path, nb, nf, HiLo, low, high, fet, dod, delta,
fill_val=-9999.0):
'''
This function runs the python implementation of the HANTS algorithm. It
takes the input netcdf file and fills the 'hants_values',
'combined_values', and 'outliers' variables.
'''
# Read netcdfs
nc_file = netCDF4.Dataset(nc_path, 'r+')
time_var = nc_file.variables['time'][:]
original_values = nc_file.variables['original_values'][:]
[rows, cols, ztime] = original_values.shape
size_st = cols*rows
values_hants = pd.np.empty((rows, cols, ztime))
outliers_hants = pd.np.empty((rows, cols, ztime))
values_hants[:] = pd.np.nan
outliers_hants[:] = pd.np.nan
# Additional parameters
ni = len(time_var)
ts = range(ni)
# Loop
counter = 1
print('Running HANTS...')
for m in range(rows):
for n in range(cols):
print('\t{0}/{1}'.format(counter, size_st))
y = pd.np.array(original_values[m, n, :])
y[pd.np.isnan(y)] = fill_val
[yr, outliers] = HANTS(ni, nb, nf, y, ts, HiLo,
low, high, fet, dod, delta, fill_val)
values_hants[m, n, :] = yr
outliers_hants[m, n, :] = outliers
counter = counter + 1
nc_file.variables['hants_values'][:] = values_hants
nc_file.variables['outliers'][:] = outliers_hants
nc_file.variables['combined_values'][:] = pd.np.where(outliers_hants,
values_hants,
original_values)
# Close netcdf file
nc_file.close()
def HANTS_singlepoint(nc_path, point, nb, nf, HiLo, low, high, fet, dod,
delta, fill_val=-9999.0):
'''
This function runs the python implementation of the HANTS algorithm for a
single point (lat, lon). It plots the fit and returns a data frame with
the 'original' and the 'hants' time series.
'''
# Location
lonx = point[0]
latx = point[1]
nc_file = netCDF4.Dataset(nc_path, 'r')
time = [pd.to_datetime(i, format='%Y%m%d')
for i in nc_file.variables['time'][:]]
lat = nc_file.variables['latitude'][:]
lon = nc_file.variables['longitude'][:]
# Check that the point falls within the extent of the netcdf file
lon_max = max(lon)
lon_min = min(lon)
lat_max = max(lat)
lat_min = min(lat)
if not (lon_min < lonx < lon_max) or not (lat_min < latx < lat_max):
warnings.warn('The point lies outside the extent of the netcd file. '
'The closest cell is plotted.')
if lonx > lon_max:
lonx = lon_max
elif lonx < lon_min:
lonx = lon_min
if latx > lat_max:
latx = lat_max
elif latx < lat_min:
latx = lat_min
# Get lat-lon index in the netcdf file
lat_closest = lat.flat[pd.np.abs(lat - latx).argmin()]
lon_closest = lon.flat[pd.np.abs(lon - lonx).argmin()]
lat_i = pd.np.where(lat == lat_closest)[0][0]
lon_i = pd.np.where(lon == lon_closest)[0][0]
# Read values
original_values = nc_file.variables['original_values'][lat_i, lon_i, :]
# Additional parameters
ni = len(time)
ts = range(ni)
# HANTS
y = pd.np.array(original_values)
y[pd.np.isnan(y)] = fill_val
[hants_values, outliers] = HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet,
dod, delta, fill_val)
# Plot
top = 1.15*max(pd.np.nanmax(original_values),
pd.np.nanmax(hants_values))
bottom = 1.15*min(pd.np.nanmin(original_values),
pd.np.nanmin(hants_values))
ylim = [bottom, top]
plt.plot(time, hants_values, 'r-', label='HANTS')
plt.plot(time, original_values, 'b.', label='Original data')
plt.ylim(ylim[0], ylim[1])
plt.legend(loc=4)
plt.xlabel('time')
plt.ylabel('values')
plt.gcf().autofmt_xdate()
plt.axes().set_title('Point: lon {0:.2f}, lat {1:.2f}'.format(lon_closest,
lat_closest))
plt.axes().set_aspect(0.5*(time[-1] - time[0]).days/(ylim[1] - ylim[0]))
plt.show()
# Close netcdf file
nc_file.close()
# Data frame
df = pd.DataFrame({'time': time,
'original': original_values,
'hants': hants_values})
# Return
return df
def HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet, dod, delta, fill_val):
'''
This function applies the Harmonic ANalysis of Time Series (HANTS)
algorithm originally developed by the Netherlands Aerospace Centre (NLR)
(http://www.nlr.org/space/earth-observation/).
This python implementation was based on two previous implementations
available at the following links:
https://codereview.stackexchange.com/questions/71489/harmonic-analysis-of-time-series-applied-to-arrays
http://nl.mathworks.com/matlabcentral/fileexchange/38841-matlab-implementation-of-harmonic-analysis-of-time-series--hants-
'''
# Arrays
mat = pd.np.zeros((min(2*nf+1, ni), ni))
# amp = np.zeros((nf + 1, 1))
# phi = np.zeros((nf+1, 1))
yr = pd.np.zeros((ni, 1))
outliers = pd.np.zeros((1, len(y)))
# Filter
sHiLo = 0
if HiLo == 'Hi':
sHiLo = -1
elif HiLo == 'Lo':
sHiLo = 1
nr = min(2*nf+1, ni)
noutmax = ni - nr - dod
# dg = 180.0/math.pi
mat[0, :] = 1.0
ang = 2*math.pi*pd.np.arange(nb)/nb
cs = pd.np.cos(ang)
sn = pd.np.sin(ang)
i = pd.np.arange(1, nf+1)
for j in pd.np.arange(ni):
index = pd.np.mod(i*ts[j], nb)
mat[2 * i-1, j] = cs.take(index)
mat[2 * i, j] = sn.take(index)
p = pd.np.ones_like(y)
bool_out = (y < low) | (y > high)
p[bool_out] = 0
outliers[bool_out.reshape(1, y.shape[0])] = 1
nout = pd.np.sum(p == 0)
if nout > noutmax:
if pd.np.isclose(y, fill_val).any():
ready = pd.np.array([True])
yr = y
outliers = pd.np.zeros((y.shape[0]), dtype=int)
outliers[:] = fill_val
else:
raise Exception('Not enough data points.')
else:
ready = pd.np.zeros((y.shape[0]), dtype=bool)
nloop = 0
nloopmax = ni
while ((not ready.all()) & (nloop < nloopmax)):
nloop += 1
za = pd.np.matmul(mat, p*y)
A = pd.np.matmul(pd.np.matmul(mat, pd.np.diag(p)),
pd.np.transpose(mat))
A = A + pd.np.identity(nr)*delta
A[0, 0] = A[0, 0] - delta
zr = pd.np.linalg.solve(A, za)
yr = pd.np.matmul(pd.np.transpose(mat), zr)
diffVec = sHiLo*(yr-y)
err = p*diffVec
err_ls = list(err)
err_sort = deepcopy(err)
err_sort.sort()
rankVec = [err_ls.index(f) for f in err_sort]
maxerr = diffVec[rankVec[-1]]
ready = (maxerr <= fet) | (nout == noutmax)
if (not ready):
i = ni - 1
j = rankVec[i]
while ((p[j]*diffVec[j] > 0.5*maxerr) & (nout < noutmax)):
p[j] = 0
outliers[0, j] = 1
nout += 1
i -= 1
if i == 0:
j = 0
else:
j = 1
return [yr, outliers]
def export_tiffs(rasters_path_out, nc_path, name_format,
export_hants_only=False):
'''
This function exports the output of the HANTS analysis.
If 'export_hants_only' is False (default), the output rasters have the best
value available. Therefore, the cells in the output rasters will have the
original value for the cells that are not outliers and the hants values for
the cells that are outliers or the cells where data is not available.
If 'export_hants_only' is True, the exported rasters have the values
obtained by the HANTS algorithm disregarding of the original values.
'''
# Print
print('Exporting...')
# Create folders
if not os.path.exists(rasters_path_out):
os.makedirs(rasters_path_out)
# Read time data
nc_file = netCDF4.Dataset(nc_path, 'r')
time_var = nc_file.variables['time'][:]
nc_file.close()
# Output type
if export_hants_only:
variable_selected = 'hants_values'
else:
variable_selected = 'combined_values'
# Loop through netcdf file
for yyyymmdd in time_var:
print('\t{0}'.format(yyyymmdd))
output_name = rasters_path_out + os.sep + name_format.format(yyyymmdd)
NetCDF_to_Raster(input_nc=nc_path, output_tiff=output_name,
ras_variable=variable_selected,
x_variable='longitude', y_variable='latitude',
crs={'variable': 'crs', 'wkt': 'crs_wkt'},
time={'variable': 'time', 'value': yyyymmdd})
# Return
print('Done')
return rasters_path_out
def plot_point(nc_path, point, ylim=None):
'''
This function plots the original time series and the HANTS time series.
It can be used to assess the fit.
'''
# Location
lonx = point[0]
latx = point[1]
nc_file = netCDF4.Dataset(nc_path, 'r')
time = [pd.to_datetime(i, format='%Y%m%d')
for i in nc_file.variables['time'][:]]
lat = nc_file.variables['latitude'][:]
lon = nc_file.variables['longitude'][:]
# Check that the point falls within the extent of the netcdf file
lon_max = max(lon)
lon_min = min(lon)
lat_max = max(lat)
lat_min = min(lat)
if not (lon_min < lonx < lon_max) or not (lat_min < latx < lat_max):
warnings.warn('The point lies outside the extent of the netcd file. '
'The closest cell is plotted.')
if lonx > lon_max:
lonx = lon_max
elif lonx < lon_min:
lonx = lon_min
if latx > lat_max:
latx = lat_max
elif latx < lat_min:
latx = lat_min
# Get lat-lon index in the netcdf file
lat_closest = lat.flat[pd.np.abs(lat - latx).argmin()]
lon_closest = lon.flat[pd.np.abs(lon - lonx).argmin()]
lat_i = pd.np.where(lat == lat_closest)[0][0]
lon_i = pd.np.where(lon == lon_closest)[0][0]
# Read values
values_o = nc_file.variables['original_values'][lat_i, lon_i, :]
values_h = nc_file.variables['hants_values'][lat_i, lon_i, :]
if not ylim:
top = 1.15*max(pd.np.nanmax(values_o),
pd.np.nanmax(values_h))
bottom = 1.15*min(pd.np.nanmin(values_o),
pd.np.nanmin(values_h))
ylim = [bottom, top]
# Plot
plt.plot(time, values_h, 'r-', label='HANTS')
plt.plot(time, values_o, 'b.', label='Original data')
plt.ylim(ylim[0], ylim[1])
plt.legend(loc=4)
plt.xlabel('time')
plt.ylabel('values')
plt.gcf().autofmt_xdate()
plt.axes().set_title('Point: lon {0:.2f}, lat {1:.2f}'.format(lon_closest,
lat_closest))
plt.axes().set_aspect(0.5*(time[-1] - time[0]).days/(ylim[1] - ylim[0]))
plt.show()
# Close netcdf file
nc_file.close()
# Return
return True
def makediag3d(M):
'''
Computing diagonal for each row of a 2d array.
Reference: http://stackoverflow.com/q/27214027/2459096
'''
b = pd.np.zeros((M.shape[0], M.shape[1]*M.shape[1]))
b[:, ::M.shape[1]+1] = M
# Return
return b.reshape(M.shape[0], M.shape[1], M.shape[1])
|
apache-2.0
|
waldol1/BYU-AWESOME
|
scripts/prediction/binary_prediction.py
|
1
|
9158
|
#!/usr/bin/python
import os
import sys
import collections
import argparse
import matplotlib
matplotlib.use('AGG')
import matplotlib.pyplot as plt
plt.ioff()
import numpy as np
import caffe
import cv2
import scipy.ndimage.morphology
LEFT_EDGE = -2
TOP_EDGE = -1
MIDDLE = 0
RIGHT_EDGE = 1
BOTTOM_EDGE = 2
def safe_mkdir(_dir):
try:
os.makedirs(_dir)
except:
pass
def setup_network(args):
network = caffe.Net(args.net_file, args.weights_file, caffe.TEST)
if args.gpu >= 0:
caffe.set_mode_gpu()
caffe.set_device(args.gpu)
else:
caffe.set_mode_cpu()
return network
def fprop(network, ims, output_blobs, args):
idx = 0
responses = collections.defaultdict(list)
while idx < len(ims):
sub_ims = ims[idx:idx+args.batch_size]
network.blobs["data"].reshape(len(sub_ims), ims[0].shape[2], ims[0].shape[0], ims[0].shape[1])
for x, im in enumerate(sub_ims):
transposed = np.transpose(im, [2,0,1])
transposed = transposed[np.newaxis, :, :, :]
network.blobs["data"].data[x,:,:,:] = transposed
idx += args.batch_size
# propagate on batch
network.forward()
for layer_name, blob_name in output_blobs:
output = np.copy(network.blobs[blob_name].data)
#print layer_name, blob_name, output.min(), output.max()
responses[layer_name].append(output)
print "Progress %d%%" % int(100 * min(idx, len(ims)) / float(len(ims)))
#return np.concatenate(responses, axis=0)
return {key: np.squeeze(np.concatenate(value, axis=0), axis=1) for key, value in responses.iteritems()}
def predict(network, ims, output_blobs, args):
raw_outputs = fprop(network, ims, output_blobs, args)
thresholded_outputs = dict()
for key, raw_output in raw_outputs.iteritems():
high_indices = raw_output >= args.threshold
predictions = np.zeros_like(raw_output)
predictions[high_indices] = 1
thresholded_outputs[key] = predictions
return raw_outputs, thresholded_outputs
def get_subwindows(im, pad_size, tile_size):
height, width = tile_size, tile_size
y_stride, x_stride = tile_size - (2 * pad_size), tile_size - (2 * pad_size)
if (height > im.shape[0]) or (width > im.shape[1]):
print "Invalid crop: crop dims larger than image (%r with %r)" % (im.shape, (height, width))
exit(1)
ims = list()
locations = list()
y = 0
y_done = False
while y <= im.shape[0] and not y_done:
x = 0
if y + height > im.shape[0]:
y = im.shape[0] - height
y_done = True
x_done = False
while x <= im.shape[1] and not x_done:
if x + width > im.shape[1]:
x = im.shape[1] - width
x_done = True
locations.append( ((y, x, y + height, x + width),
(y + pad_size, x + pad_size, y + y_stride, x + x_stride),
TOP_EDGE if y == 0 else (BOTTOM_EDGE if y == (im.shape[0] - height) else MIDDLE),
LEFT_EDGE if x == 0 else (RIGHT_EDGE if x == (im.shape[1] - width) else MIDDLE)
) )
ims.append(im[y:y+height,x:x+width])
x += x_stride
y += y_stride
return locations, ims
def stich_together(locations, subwindows, size, dtype, pad_size, tile_size):
output = np.zeros(size, dtype=dtype)
for location, subwindow in zip(locations, subwindows):
outer_bounding_box, inner_bounding_box, y_type, x_type = location
y_paste, x_paste, y_cut, x_cut, height_paste, width_paste = -1, -1, -1, -1, -1, -1
if y_type == TOP_EDGE:
y_cut = 0
y_paste = 0
height_paste = tile_size - pad_size
elif y_type == MIDDLE:
y_cut = pad_size
y_paste = inner_bounding_box[0]
height_paste = tile_size - 2 * pad_size
elif y_type == BOTTOM_EDGE:
y_cut = pad_size
y_paste = inner_bounding_box[0]
height_paste = tile_size - pad_size
if x_type == LEFT_EDGE:
x_cut = 0
x_paste = 0
width_paste = tile_size - pad_size
elif x_type == MIDDLE:
x_cut = pad_size
x_paste = inner_bounding_box[1]
width_paste = tile_size - 2 * pad_size
elif x_type == RIGHT_EDGE:
x_cut = pad_size
x_paste = inner_bounding_box[1]
width_paste = tile_size - pad_size
output[y_paste:y_paste+height_paste, x_paste:x_paste+width_paste] = subwindow[y_cut:y_cut+height_paste, x_cut:x_cut+width_paste]
return output
def save_histo(data, fname, title, weights=None):
if weights is not None:
weights = weights.flatten()
n, bins, patches = plt.hist(data.flatten(), bins=100, weights=weights, log=True)
plt.title(title)
plt.xlabel('Predicted Probability of Foreground')
plt.ylabel('Pixel Count')
plt.tick_params(axis='y', which='minor', left='off', right='off')
plt.savefig(fname)
plt.clf()
def xor_image(im1, im2):
out_image = np.zeros(im1.shape + (3,))
for y in xrange(im1.shape[0]):
for x in xrange(im1.shape[1]):
if im1[y,x]:
if im2[y,x]:
# white on white
out_image[y,x] = (255,255,255)
else:
# white on black
out_image[y,x] = (255,0,0)
else:
if im2[y,x]:
# black on white
out_image[y,x] = (0,255,0)
else:
# black on black
out_image[y,x] = (0,0,0)
return out_image
def get_ims_files(args):
im_files = map(lambda s: s.strip(), open(args.image_manifest, 'r').readlines())
im_dirs = args.im_dirs.split(',')
return im_files, im_dirs
def load_im(im_file, im_dirs, args):
ims = list()
for im_dir in im_dirs:
im_path = os.path.join(args.dataset_dir, im_dir, im_file)
im = cv2.imread(im_path, -1) # reads in image as is
if im is None:
raise Exception("File does not exist: %s" % im_path)
if im.ndim == 2:
im = im[:,:,np.newaxis]
ims.append(im)
im = np.concatenate(ims, axis=2)
im = im - args.mean
im = args.scale * im
return im
def write_output(locations, raw_subwindows, binarized_subwindows, im_file, image, pad_size, im, args):
for key in raw_subwindows.keys():
binary_result = stich_together(locations, binarized_subwindows[key], tuple(image.shape[0:2]),
np.uint8, pad_size, args.tile_size)
binary_result = 255 * binary_result
binary_out_file = os.path.join(args.out_dir, 'binary', key, im_file)
cv2.imwrite(binary_out_file, binary_result)
# TODO: fix this
raw_result = stich_together(locations, raw_subwindows[key], tuple(image.shape[0:2]),
np.float, pad_size, args.tile_size)
raw_result = (255 * raw_result).astype(np.uint8)
raw_out_file = os.path.join(args.out_dir, 'raw', key, im_file)
cv2.imwrite(raw_out_file, raw_result)
def get_output_blobs(f):
output_blobs = list()
lines = open(f).readlines()
for idx, line in enumerate(lines):
if 'Sigmoid' in line:
# layer name is meaningful
layer_name = lines[idx-1].split()[-1].strip('"')
# top name is autogenerated, but what the network recognizes
top_name = lines[idx+2].split()[-1].strip('"')
output_blobs.append( (layer_name, top_name) )
return output_blobs
def main(args):
output_blobs = get_output_blobs(args.net_file)
for layer_name, _ in output_blobs:
safe_mkdir(os.path.join(args.out_dir, 'raw', layer_name))
safe_mkdir(os.path.join(args.out_dir, 'binary', layer_name))
network = setup_network(args)
im_files, im_dirs = get_ims_files(args)
pad_size = args.pad
for idx, im_file in enumerate(im_files):
image = load_im(im_file, im_dirs, args)
if idx == 0:
print image.shape
if idx and idx % args.print_count == 0:
print "Processed %d/%d Images" % (idx, len(im_files))
locations, subwindows = get_subwindows(image, pad_size, args.tile_size)
raw_subwindows, binarized_subwindows = predict(network, subwindows, output_blobs, args)
write_output(locations, raw_subwindows, binarized_subwindows, im_file, image, pad_size, image, args)
def get_args():
parser = argparse.ArgumentParser(description="Outputs binary predictions")
parser.add_argument("net_file",
help="The deploy.prototxt")
parser.add_argument("weights_file",
help="The weights.caffemodel")
parser.add_argument("dataset_dir",
help="The dataset to be evaluated")
parser.add_argument("image_manifest",
help="txt file listing images to evaluate")
parser.add_argument("out_dir",
help="output directory")
parser.add_argument("--gpu", type=int, default=0,
help="GPU to use for running the network")
parser.add_argument("-m", "--mean", type=float, default=127.,
help="Mean value for data preprocessing")
parser.add_argument("-a", "--scale", type=float, default=0.0039,
help="Optional scale factor")
parser.add_argument("--print-count", default=1, type=int,
help="Print every print-count images processed")
parser.add_argument("-b", "--batch-size", default=4, type=int,
help="Max number of transforms in single batch per original image")
parser.add_argument("-p", "--pad", default=96, type=int,
help="Padding size")
parser.add_argument("-t", "--tile-size", default=256, type=int,
help="Size of tiles to extract")
parser.add_argument("--im-dirs", default='original_images', type=str,
help="comma separated list of input images to the network")
parser.add_argument("--threshold", default=0.5, type=float,
help="Probability threshold for foreground/background")
#parser.add_argument("-v", "--verbose", default=False, action='store_true',
# help="Write auxiliary images for analysis")
args = parser.parse_args()
print args
return args
if __name__ == "__main__":
args = get_args()
main(args)
|
gpl-3.0
|
mriosb08/palodiem-QE
|
src/testReg.py
|
1
|
1095
|
import sys
from math import sqrt
from sklearn.metrics import accuracy_score, mean_absolute_error, mean_squared_error
from scipy.stats.mstats import mquantiles
import scipy as sp
import numpy as np
def main(args):
(ftrue, fpred) = args
y_true = [float(line.strip()) for line in open(ftrue)]
y_pred = [float(line.strip()) for line in open(fpred)]
y_true = np.asarray(y_true)
y_pred = np.asarray(y_pred)
mae = mean_absolute_error(y_true, y_pred)
mse = mean_squared_error(y_true, y_pred)
print 'MAE: ', mae
print 'RMSE: ', sqrt(mse)
print 'pearson:', sp.stats.pearsonr(y_true, y_pred)[0]
print 'spearman', sp.stats.spearmanr(y_true, y_pred)[0]
print 'r-squared', sp.stats.linregress(y_true, y_pred)[0]
print 'true: ', mquantiles(y_true, prob=[0.1,0.9])
print 'pred: ', mquantiles(y_pred, prob=[0.1,0.9])
print 'resid_mean: ', np.mean(y_true - y_pred)
return
if __name__ == '__main__':
if len(sys.argv) != 3:
print 'usage:python testReg.py <y-true> <y-predicted>'
sys.exit(1)
else:
main(sys.argv[1:])
|
apache-2.0
|
jmargeta/scikit-learn
|
sklearn/utils/tests/test_random.py
|
20
|
3872
|
from __future__ import division
import numpy as np
from scipy.misc import comb as combinations
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
|
bsd-3-clause
|
mo-g/iris
|
docs/iris/example_code/General/polynomial_fit.py
|
7
|
1443
|
"""
Fitting a polynomial
====================
This example demonstrates computing a polynomial fit to 1D data from an Iris
cube, adding the fit to the cube's metadata, and plotting both the 1D data and
the fit.
"""
import matplotlib.pyplot as plt
import numpy as np
import iris
import iris.quickplot as qplt
def main():
fname = iris.sample_data_path('A1B_north_america.nc')
cube = iris.load_cube(fname)
# Extract a single time series at a latitude and longitude point.
location = next(cube.slices(['time']))
# Calculate a polynomial fit to the data at this time series.
x_points = location.coord('time').points
y_points = location.data
degree = 2
p = np.polyfit(x_points, y_points, degree)
y_fitted = np.polyval(p, x_points)
# Add the polynomial fit values to the time series to take
# full advantage of Iris plotting functionality.
long_name = 'degree_{}_polynomial_fit_of_{}'.format(degree, cube.name())
fit = iris.coords.AuxCoord(y_fitted, long_name=long_name,
units=location.units)
location.add_aux_coord(fit, 0)
qplt.plot(location.coord('time'), location, label='data')
qplt.plot(location.coord('time'),
location.coord(long_name),
'g-', label='polynomial fit')
plt.legend(loc='best')
plt.title('Trend of US air temperature over time')
qplt.show()
if __name__ == '__main__':
main()
|
gpl-3.0
|
rs2/pandas
|
pandas/_config/display.py
|
3
|
1756
|
"""
Unopinionated display configuration.
"""
import locale
import sys
from pandas._config import config as cf
# -----------------------------------------------------------------------------
# Global formatting options
_initial_defencoding = None
def detect_console_encoding() -> str:
"""
Try to find the most capable encoding supported by the console.
slightly modified from the way IPython handles the same issue.
"""
global _initial_defencoding
encoding = None
try:
encoding = sys.stdout.encoding or sys.stdin.encoding
except (AttributeError, OSError):
pass
# try again for something better
if not encoding or "ascii" in encoding.lower():
try:
encoding = locale.getpreferredencoding()
except locale.Error:
# can be raised by locale.setlocale(), which is
# called by getpreferredencoding
# (on some systems, see stdlib locale docs)
pass
# when all else fails. this will usually be "ascii"
if not encoding or "ascii" in encoding.lower():
encoding = sys.getdefaultencoding()
# GH#3360, save the reported defencoding at import time
# MPL backends may change it. Make available for debugging.
if not _initial_defencoding:
_initial_defencoding = sys.getdefaultencoding()
return encoding
pc_encoding_doc = """
: str/unicode
Defaults to the detected encoding of the console.
Specifies the encoding to be used for strings returned by to_string,
these are generally strings meant to be displayed on the console.
"""
with cf.config_prefix("display"):
cf.register_option(
"encoding", detect_console_encoding(), pc_encoding_doc, validator=cf.is_text
)
|
bsd-3-clause
|
huongttlan/statsmodels
|
examples/python/regression_plots.py
|
33
|
9585
|
## Regression Plots
from __future__ import print_function
from statsmodels.compat import lzip
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.formula.api import ols
### Duncan's Prestige Dataset
#### Load the Data
# We can use a utility function to load any R dataset available from the great <a href="http://vincentarelbundock.github.com/Rdatasets/">Rdatasets package</a>.
prestige = sm.datasets.get_rdataset("Duncan", "car", cache=True).data
prestige.head()
prestige_model = ols("prestige ~ income + education", data=prestige).fit()
print(prestige_model.summary())
#### Influence plots
# Influence plots show the (externally) studentized residuals vs. the leverage of each observation as measured by the hat matrix.
#
# Externally studentized residuals are residuals that are scaled by their standard deviation where
#
# $$var(\\hat{\epsilon}_i)=\hat{\sigma}^2_i(1-h_{ii})$$
#
# with
#
# $$\hat{\sigma}^2_i=\frac{1}{n - p - 1 \;\;}\sum_{j}^{n}\;\;\;\forall \;\;\; j \neq i$$
#
# $n$ is the number of observations and $p$ is the number of regressors. $h_{ii}$ is the $i$-th diagonal element of the hat matrix
#
# $$H=X(X^{\;\prime}X)^{-1}X^{\;\prime}$$
#
# The influence of each point can be visualized by the criterion keyword argument. Options are Cook's distance and DFFITS, two measures of influence.
fig, ax = plt.subplots(figsize=(12,8))
fig = sm.graphics.influence_plot(prestige_model, ax=ax, criterion="cooks")
# As you can see there are a few worrisome observations. Both contractor and reporter have low leverage but a large residual. <br />
# RR.engineer has small residual and large leverage. Conductor and minister have both high leverage and large residuals, and, <br />
# therefore, large influence.
#### Partial Regression Plots
# Since we are doing multivariate regressions, we cannot just look at individual bivariate plots to discern relationships. <br />
# Instead, we want to look at the relationship of the dependent variable and independent variables conditional on the other <br />
# independent variables. We can do this through using partial regression plots, otherwise known as added variable plots. <br />
#
# In a partial regression plot, to discern the relationship between the response variable and the $k$-th variabe, we compute <br />
# the residuals by regressing the response variable versus the independent variables excluding $X_k$. We can denote this by <br />
# $X_{\sim k}$. We then compute the residuals by regressing $X_k$ on $X_{\sim k}$. The partial regression plot is the plot <br />
# of the former versus the latter residuals. <br />
#
# The notable points of this plot are that the fitted line has slope $\beta_k$ and intercept zero. The residuals of this plot <br />
# are the same as those of the least squares fit of the original model with full $X$. You can discern the effects of the <br />
# individual data values on the estimation of a coefficient easily. If obs_labels is True, then these points are annotated <br />
# with their observation label. You can also see the violation of underlying assumptions such as homooskedasticity and <br />
# linearity.
fig, ax = plt.subplots(figsize=(12,8))
fig = sm.graphics.plot_partregress("prestige", "income", ["income", "education"], data=prestige, ax=ax)
ax = fig.axes[0]
ax.set_xlim(-2e-15, 1e-14)
ax.set_ylim(-25, 30);
fix, ax = plt.subplots(figsize=(12,14))
fig = sm.graphics.plot_partregress("prestige", "income", ["education"], data=prestige, ax=ax)
# As you can see the partial regression plot confirms the influence of conductor, minister, and RR.engineer on the partial relationship between income and prestige. The cases greatly decrease the effect of income on prestige. Dropping these cases confirms this.
subset = ~prestige.index.isin(["conductor", "RR.engineer", "minister"])
prestige_model2 = ols("prestige ~ income + education", data=prestige, subset=subset).fit()
print(prestige_model2.summary())
# For a quick check of all the regressors, you can use plot_partregress_grid. These plots will not label the <br />
# points, but you can use them to identify problems and then use plot_partregress to get more information.
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_partregress_grid(prestige_model, fig=fig)
#### Component-Component plus Residual (CCPR) Plots
# The CCPR plot provides a way to judge the effect of one regressor on the <br />
# response variable by taking into account the effects of the other <br />
# independent variables. The partial residuals plot is defined as <br />
# $\text{Residuals} + B_iX_i \text{ }\text{ }$ versus $X_i$. The component adds $B_iX_i$ versus <br />
# $X_i$ to show where the fitted line would lie. Care should be taken if $X_i$ <br />
# is highly correlated with any of the other independent variables. If this <br />
# is the case, the variance evident in the plot will be an underestimate of <br />
# the true variance.
fig, ax = plt.subplots(figsize=(12, 8))
fig = sm.graphics.plot_ccpr(prestige_model, "education", ax=ax)
# As you can see the relationship between the variation in prestige explained by education conditional on income seems to be linear, though you can see there are some observations that are exerting considerable influence on the relationship. We can quickly look at more than one variable by using plot_ccpr_grid.
fig = plt.figure(figsize=(12, 8))
fig = sm.graphics.plot_ccpr_grid(prestige_model, fig=fig)
#### Regression Plots
# The plot_regress_exog function is a convenience function that gives a 2x2 plot containing the dependent variable and fitted values with confidence intervals vs. the independent variable chosen, the residuals of the model vs. the chosen independent variable, a partial regression plot, and a CCPR plot. This function can be used for quickly checking modeling assumptions with respect to a single regressor.
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_regress_exog(prestige_model, "education", fig=fig)
#### Fit Plot
# The plot_fit function plots the fitted values versus a chosen independent variable. It includes prediction confidence intervals and optionally plots the true dependent variable.
fig, ax = plt.subplots(figsize=(12, 8))
fig = sm.graphics.plot_fit(prestige_model, "education", ax=ax)
### Statewide Crime 2009 Dataset
# Compare the following to http://www.ats.ucla.edu/stat/stata/webbooks/reg/chapter4/statareg_self_assessment_answers4.htm
#
# Though the data here is not the same as in that example. You could run that example by uncommenting the necessary cells below.
#dta = pd.read_csv("http://www.stat.ufl.edu/~aa/social/csv_files/statewide-crime-2.csv")
#dta = dta.set_index("State", inplace=True).dropna()
#dta.rename(columns={"VR" : "crime",
# "MR" : "murder",
# "M" : "pctmetro",
# "W" : "pctwhite",
# "H" : "pcths",
# "P" : "poverty",
# "S" : "single"
# }, inplace=True)
#
#crime_model = ols("murder ~ pctmetro + poverty + pcths + single", data=dta).fit()
dta = sm.datasets.statecrime.load_pandas().data
crime_model = ols("murder ~ urban + poverty + hs_grad + single", data=dta).fit()
print(crime_model.summary())
#### Partial Regression Plots
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_partregress_grid(crime_model, fig=fig)
fig, ax = plt.subplots(figsize=(12,8))
fig = sm.graphics.plot_partregress("murder", "hs_grad", ["urban", "poverty", "single"], ax=ax, data=dta)
#### Leverage-Resid<sup>2</sup> Plot
# Closely related to the influence_plot is the leverage-resid<sup>2</sup> plot.
fig, ax = plt.subplots(figsize=(8,6))
fig = sm.graphics.plot_leverage_resid2(crime_model, ax=ax)
#### Influence Plot
fig, ax = plt.subplots(figsize=(8,6))
fig = sm.graphics.influence_plot(crime_model, ax=ax)
#### Using robust regression to correct for outliers.
# Part of the problem here in recreating the Stata results is that M-estimators are not robust to leverage points. MM-estimators should do better with this examples.
from statsmodels.formula.api import rlm
rob_crime_model = rlm("murder ~ urban + poverty + hs_grad + single", data=dta,
M=sm.robust.norms.TukeyBiweight(3)).fit(conv="weights")
print(rob_crime_model.summary())
#rob_crime_model = rlm("murder ~ pctmetro + poverty + pcths + single", data=dta, M=sm.robust.norms.TukeyBiweight()).fit(conv="weights")
#print(rob_crime_model.summary())
# There aren't yet an influence diagnostics as part of RLM, but we can recreate them. (This depends on the status of [issue #888](https://github.com/statsmodels/statsmodels/issues/808))
weights = rob_crime_model.weights
idx = weights > 0
X = rob_crime_model.model.exog[idx]
ww = weights[idx] / weights[idx].mean()
hat_matrix_diag = ww*(X*np.linalg.pinv(X).T).sum(1)
resid = rob_crime_model.resid
resid2 = resid**2
resid2 /= resid2.sum()
nobs = int(idx.sum())
hm = hat_matrix_diag.mean()
rm = resid2.mean()
from statsmodels.graphics import utils
fig, ax = plt.subplots(figsize=(12,8))
ax.plot(resid2[idx], hat_matrix_diag, 'o')
ax = utils.annotate_axes(range(nobs), labels=rob_crime_model.model.data.row_labels[idx],
points=lzip(resid2[idx], hat_matrix_diag), offset_points=[(-5,5)]*nobs,
size="large", ax=ax)
ax.set_xlabel("resid2")
ax.set_ylabel("leverage")
ylim = ax.get_ylim()
ax.vlines(rm, *ylim)
xlim = ax.get_xlim()
ax.hlines(hm, *xlim)
ax.margins(0,0)
|
bsd-3-clause
|
quentinl-c/network_testing
|
experimentation/exploitation/display_measurements.py
|
1
|
3098
|
import sys
import re
import csv
import matplotlib.pyplot as plt
from os import listdir
from os.path import isfile, isdir, join
"""
=== UTILS ===
"""
TAIL_LEN_DELAY = len('_results_delays.csv')
TAIL_LEN_RATE = len('_results_loss-rate.csv')
SCALE = 22
def getCSVFiles(path):
regexp = re.compile(r'.*csv$')
files = [f for f in listdir(path) if isfile(join(path, f))]
return [f for f in files if re.search(regexp, f) is not None]
def drawDelays(path, name):
file = open(path, "r")
x = list()
y = list()
try:
reader = csv.reader(file)
for row in reader:
# print(row)
for elt in row[1:]:
x.append(int(row[0]))
y.append(int(elt))
plt.scatter(x, y, s=10)
plt.title('Delays for messages emitted by ' + name)
plt.xlabel('Messages rank')
plt.ylabel('Delays measured by reader')
plt.savefig('ScatterPlot.png')
plt.show()
finally:
file.close()
def drawBoxDelays(path, name):
file = open(path, "r")
data = []
try:
reader = csv.reader(file)
for row in reader:
print(reader.line_num)
if (reader.line_num % SCALE == 0):
l = []
for elt in row[1:]:
l.append(int(elt))
data.append(l)
plt.boxplot(data)
plt.title('Delays for messages emitted by ' + name)
plt.xlabel('Messages rank factor %s' % SCALE)
plt.ylabel('Delays measured by reader')
plt.savefig('ScatterPlot.png')
plt.show()
finally:
file.close()
def drawLossRate(path, name):
print("toto")
file = open(path, "r")
x = list()
y = list()
try:
reader = csv.reader(file)
for row in reader:
print(row)
x.append(int(row[0]))
y.append(float(row[1]))
plt.scatter(x, y, s=10, alpha=0.05)
plt.title('Loss rate for messages emitted by ' + name)
plt.xlabel('Messages rank')
plt.ylabel('Loss rate')
plt.savefig('ScatterPlot.png')
plt.show()
finally:
file.close()
if __name__ == '__main__':
if len(sys.argv) < 2:
print('usage : display_measurements.py path_to_sources_dir ')
sys.exit(1)
path_to_sources_dir = sys.argv[1]
if not isdir(path_to_sources_dir):
print('The path to sources dir given is not correct')
sys.exit(1)
files = getCSVFiles(path_to_sources_dir)
if len(files) < 2:
print('Not enough result files into the directory given')
sys.exit(1)
is_delay = re.compile(r'.*delays.csv$')
is_loss_rate = re.compile(r'.*loss-rate.csv$')
file_recorded = []
for f in files:
if re.search(is_delay, f) is not None:
drawDelays(join(path_to_sources_dir, f), f[:-TAIL_LEN_DELAY])
drawBoxDelays(join(path_to_sources_dir, f), f[:-TAIL_LEN_DELAY])
elif re.search(is_loss_rate, f) is not None:
drawLossRate(join(path_to_sources_dir, f), f[:-TAIL_LEN_RATE])
|
gpl-3.0
|
tomlof/scikit-learn
|
sklearn/feature_selection/__init__.py
|
140
|
1302
|
"""
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
from .from_model import SelectFromModel
from .mutual_info_ import mutual_info_regression, mutual_info_classif
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectFromModel',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression',
'mutual_info_classif',
'mutual_info_regression']
|
bsd-3-clause
|
binghongcha08/pyQMD
|
QMC/MC_exchange/permute4d/dissipation/2.0/traj.py
|
17
|
1290
|
import numpy as np
import pylab as plt
import matplotlib.pyplot as plt
import matplotlib as mpl
#data = np.genfromtxt(fname='/home/bing/dissipation/energy.dat')
data = np.genfromtxt(fname='energy.dat')
fig, (ax1,ax2) = plt.subplots(ncols=1, nrows=2, sharex=True)
#font = {'family' : 'ubuntu',
# 'weight' : 'normal',
# 'size' : '16'}
#mpl.rc('font', **font) # pass in the font dict as kwargs
mpl.rcParams['font.size'] = 12
#mpl.rcParams['figure.figsize'] = 8,6
#pl.title('two-steps fitting alg')
ax1.set_ylabel('Energy [hartree]')
ax1.plot(data[:,0],data[:,2],'b--',linewidth=2,label='Potential')
#pl.plot(dat[:,0],dat[:,2],'r-',linewidth=2)
ax1.plot(data[:,0],data[:,3],'g-.',linewidth=2,label='Quantum Potential')
ax1.plot(data[:,0],data[:,4],'k-',linewidth=2,label='Energy')
#pl.legend(bbox_to_anchor=(0.5, 0.38, 0.42, .302), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#ax1.set_yticks((0.4,0.6,0.8))
ax1.legend(loc=0)
ax1.set_ylim(0,5)
ax2.set_xlabel('time [a.u.]')
ax2.set_ylabel('Energy [hartree]')
ax2.plot(data[:,0],data[:,1],'r--',linewidth=2,label='$Kinetic$')
#pl.plot(dat[:,0],dat[:,1],'k-',linewidth=2)
ax2.set_yscale('log')
#ax2.set_xticks((0,4,8))
#ax2.set_yticks((1e-7,1e-5,1e-3))
plt.legend(loc=0)
plt.subplots_adjust(hspace=0.)
plt.show()
|
gpl-3.0
|
vseledkin/dl4mt-material
|
session2/nmt.py
|
5
|
39416
|
'''
Build a simple neural machine translation model
'''
import theano
import theano.tensor as tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import cPickle as pkl
import numpy
import copy
import os
import warnings
import sys
import time
from scipy import optimize, stats
from collections import OrderedDict
from sklearn.cross_validation import KFold
from data_iterator import TextIterator
profile = False
# push parameters to Theano shared variables
def zipp(params, tparams):
for kk, vv in params.iteritems():
tparams[kk].set_value(vv)
# pull parameters from Theano shared variables
def unzip(zipped):
new_params = OrderedDict()
for kk, vv in zipped.iteritems():
new_params[kk] = vv.get_value()
return new_params
# get the list of parameters: Note that tparams must be OrderedDict
def itemlist(tparams):
return [vv for kk, vv in tparams.iteritems()]
# dropout
def dropout_layer(state_before, use_noise, trng):
proj = tensor.switch(use_noise,
state_before * trng.binomial(state_before.shape, p=0.5, n=1, dtype=state_before.dtype),
state_before * 0.5)
return proj
# make prefix-appended name
def _p(pp, name):
return '%s_%s'%(pp, name)
# initialize Theano shared variables according to the initial parameters
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.iteritems():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
# load parameters
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.iteritems():
if kk not in pp:
warnings.warn('%s is not in the archive'%kk)
continue
params[kk] = pp[kk]
return params
# layers: 'name': ('parameter initializer', 'feedforward')
layers = {'ff': ('param_init_fflayer', 'fflayer'),
'gru': ('param_init_gru', 'gru_layer'),
'gru_cond': ('param_init_gru_cond', 'gru_cond_layer'),
}
def get_layer(name):
fns = layers[name]
return (eval(fns[0]), eval(fns[1]))
# some utilities
def ortho_weight(ndim):
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype('float32')
def norm_weight(nin,nout=None, scale=0.01, ortho=True):
if nout == None:
nout = nin
if nout == nin and ortho:
W = ortho_weight(nin)
else:
W = scale * numpy.random.randn(nin, nout)
return W.astype('float32')
def tanh(x):
return tensor.tanh(x)
def linear(x):
return x
def concatenate(tensor_list, axis=0):
"""
Alternative implementation of `theano.tensor.concatenate`.
This function does exactly the same thing, but contrary to Theano's own
implementation, the gradient is implemented on the GPU.
Backpropagating through `theano.tensor.concatenate` yields slowdowns
because the inverse operation (splitting) needs to be done on the CPU.
This implementation does not have that problem.
:usage:
>>> x, y = theano.tensor.matrices('x', 'y')
>>> c = concatenate([x, y], axis=1)
:parameters:
- tensor_list : list
list of Theano tensor expressions that should be concatenated.
- axis : int
the tensors will be joined along this axis.
:returns:
- out : tensor
the concatenated tensor expression.
"""
concat_size = sum(tt.shape[axis] for tt in tensor_list)
output_shape = ()
for k in range(axis):
output_shape += (tensor_list[0].shape[k],)
output_shape += (concat_size,)
for k in range(axis + 1, tensor_list[0].ndim):
output_shape += (tensor_list[0].shape[k],)
out = tensor.zeros(output_shape)
offset = 0
for tt in tensor_list:
indices = ()
for k in range(axis):
indices += (slice(None),)
indices += (slice(offset, offset + tt.shape[axis]),)
for k in range(axis + 1, tensor_list[0].ndim):
indices += (slice(None),)
out = tensor.set_subtensor(out[indices], tt)
offset += tt.shape[axis]
return out
# batch preparation
def prepare_data(seqs_x, seqs_y, maxlen=None, n_words_src=30000, n_words=30000):
# x: a list of sentences
lengths_x = [len(s) for s in seqs_x]
lengths_y = [len(s) for s in seqs_y]
if maxlen != None:
new_seqs_x = []
new_seqs_y = []
new_lengths_x = []
new_lengths_y = []
for l_x, s_x, l_y, s_y in zip(lengths_x, seqs_x, lengths_y, seqs_y):
if l_x < maxlen and l_y < maxlen:
new_seqs_x.append(s_x)
new_lengths_x.append(l_x)
new_seqs_y.append(s_y)
new_lengths_y.append(l_y)
lengths_x = new_lengths_x
seqs_x = new_seqs_x
lengths_y = new_lengths_y
seqs_y = new_seqs_y
if len(lengths_x) < 1 or len(lengths_y) < 1:
return None, None, None, None
n_samples = len(seqs_x)
maxlen_x = numpy.max(lengths_x) + 1
maxlen_y = numpy.max(lengths_y) + 1
x = numpy.zeros((maxlen_x, n_samples)).astype('int64')
y = numpy.zeros((maxlen_y, n_samples)).astype('int64')
x_mask = numpy.zeros((maxlen_x, n_samples)).astype('float32')
y_mask = numpy.zeros((maxlen_y, n_samples)).astype('float32')
for idx, [s_x, s_y] in enumerate(zip(seqs_x,seqs_y)):
x[:lengths_x[idx],idx] = s_x
x_mask[:lengths_x[idx]+1,idx] = 1.
y[:lengths_y[idx],idx] = s_y
y_mask[:lengths_y[idx]+1,idx] = 1.
return x, x_mask, y, y_mask
# feedforward layer: affine transformation + point-wise nonlinearity
def param_init_fflayer(options, params, prefix='ff', nin=None, nout=None, ortho=True):
if nin == None:
nin = options['dim_proj']
if nout == None:
nout = options['dim_proj']
params[_p(prefix,'W')] = norm_weight(nin, nout, scale=0.01, ortho=ortho)
params[_p(prefix,'b')] = numpy.zeros((nout,)).astype('float32')
return params
def fflayer(tparams, state_below, options, prefix='rconv', activ='lambda x: tensor.tanh(x)', **kwargs):
return eval(activ)(tensor.dot(state_below, tparams[_p(prefix,'W')])+tparams[_p(prefix,'b')])
# GRU layer
def param_init_gru(options, params, prefix='gru', nin=None, dim=None, hiero=False):
if nin == None:
nin = options['dim_proj']
if dim == None:
dim = options['dim_proj']
if not hiero:
W = numpy.concatenate([norm_weight(nin,dim),
norm_weight(nin,dim)], axis=1)
params[_p(prefix,'W')] = W
params[_p(prefix,'b')] = numpy.zeros((2 * dim,)).astype('float32')
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim)], axis=1)
params[_p(prefix,'U')] = U
Wx = norm_weight(nin, dim)
params[_p(prefix,'Wx')] = Wx
Ux = ortho_weight(dim)
params[_p(prefix,'Ux')] = Ux
params[_p(prefix,'bx')] = numpy.zeros((dim,)).astype('float32')
return params
def gru_layer(tparams, state_below, options, prefix='gru', mask=None, **kwargs):
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
dim = tparams[_p(prefix,'Ux')].shape[1]
if mask == None:
mask = tensor.alloc(1., state_below.shape[0], 1)
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + tparams[_p(prefix, 'bx')]
U = tparams[_p(prefix, 'U')]
Ux = tparams[_p(prefix, 'Ux')]
def _step_slice(m_, x_, xx_, h_, U, Ux):
preact = tensor.dot(h_, U)
preact += x_
r = tensor.nnet.sigmoid(_slice(preact, 0, dim))
u = tensor.nnet.sigmoid(_slice(preact, 1, dim))
preactx = tensor.dot(h_, Ux)
preactx = preactx * r
preactx = preactx + xx_
h = tensor.tanh(preactx)
h = u * h_ + (1. - u) * h
h = m_[:,None] * h + (1. - m_)[:,None] * h_
return h#, r, u, preact, preactx
seqs = [mask, state_below_, state_belowx]
_step = _step_slice
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info = [tensor.alloc(0., n_samples, dim)],
#None, None, None, None],
non_sequences = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Ux')]],
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
rval = [rval]
return rval
# Conditional GRU layer with Attention
def param_init_gru_cond(options, params, prefix='gru_cond',
nin=None, dim=None, dimctx=None):
if nin == None:
nin = options['dim']
if dim == None:
dim = options['dim']
if dimctx == None:
dimctx = options['dim']
params = param_init_gru(options, params, prefix, nin=nin, dim=dim)
# context to LSTM
Wc = norm_weight(dimctx,dim*2)
params[_p(prefix,'Wc')] = Wc
Wcx = norm_weight(dimctx,dim)
params[_p(prefix,'Wcx')] = Wcx
# attention: prev -> hidden
Wi_att = norm_weight(nin,dimctx)
params[_p(prefix,'Wi_att')] = Wi_att
# attention: context -> hidden
Wc_att = norm_weight(dimctx)
params[_p(prefix,'Wc_att')] = Wc_att
# attention: LSTM -> hidden
Wd_att = norm_weight(dim,dimctx)
params[_p(prefix,'Wd_att')] = Wd_att
# attention: hidden bias
b_att = numpy.zeros((dimctx,)).astype('float32')
params[_p(prefix,'b_att')] = b_att
# attention:
U_att = norm_weight(dimctx,1)
params[_p(prefix,'U_att')] = U_att
c_att = numpy.zeros((1,)).astype('float32')
params[_p(prefix, 'c_tt')] = c_att
return params
def gru_cond_layer(tparams, state_below, options, prefix='gru',
mask=None, context=None, one_step=False,
init_memory=None, init_state=None,
context_mask=None,
**kwargs):
assert context, 'Context must be provided'
if one_step:
assert init_state, 'previous state must be provided'
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
# mask
if mask == None:
mask = tensor.alloc(1., state_below.shape[0], 1)
dim = tparams[_p(prefix, 'Wcx')].shape[1]
# initial/previous state
if init_state == None:
init_state = tensor.alloc(0., n_samples, dim)
# projected context
assert context.ndim == 3, 'Context must be 3-d: #annotation x #sample x dim'
pctx_ = tensor.dot(context, tparams[_p(prefix,'Wc_att')]) + tparams[_p(prefix,'b_att')]
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
# projected x
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + tparams[_p(prefix, 'bx')]
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
state_belowc = tensor.dot(state_below, tparams[_p(prefix, 'Wi_att')])
def _step_slice(m_, x_, xx_, xc_, h_, ctx_, alpha_, pctx_, cc_,
U, Wc, Wd_att, U_att, c_tt, Ux, Wcx):
# attention
pstate_ = tensor.dot(h_, Wd_att)
pctx__ = pctx_ + pstate_[None,:,:]
pctx__ += xc_
pctx__ = tensor.tanh(pctx__)
alpha = tensor.dot(pctx__, U_att)+c_tt
alpha = alpha.reshape([alpha.shape[0], alpha.shape[1]])
alpha = tensor.exp(alpha)
if context_mask:
alpha = alpha * context_mask
alpha = alpha / alpha.sum(0, keepdims=True)
ctx_ = (cc_ * alpha[:,:,None]).sum(0) # current context
preact = tensor.dot(h_, U)
preact += x_
preact += tensor.dot(ctx_, Wc)
preact = tensor.nnet.sigmoid(preact)
r = _slice(preact, 0, dim)
u = _slice(preact, 1, dim)
preactx = tensor.dot(h_, Ux)
preactx *= r
preactx += xx_
preactx += tensor.dot(ctx_, Wcx)
h = tensor.tanh(preactx)
h = u * h_ + (1. - u) * h
h = m_[:,None] * h + (1. - m_)[:,None] * h_
return h, ctx_, alpha.T #, pstate_, preact, preactx, r, u
seqs = [mask, state_below_, state_belowx, state_belowc]
_step = _step_slice
shared_vars = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Wc')],
tparams[_p(prefix,'Wd_att')],
tparams[_p(prefix,'U_att')],
tparams[_p(prefix, 'c_tt')],
tparams[_p(prefix, 'Ux')],
tparams[_p(prefix, 'Wcx')]]
if one_step:
rval = _step(*(seqs+[init_state, None, None, pctx_, context]+shared_vars))
else:
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info = [init_state,
tensor.alloc(0., n_samples, context.shape[2]),
tensor.alloc(0., n_samples, context.shape[0])],
non_sequences=[pctx_,
context]+shared_vars,
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
return rval
# initialize all parameters
def init_params(options):
params = OrderedDict()
# embedding
params['Wemb'] = norm_weight(options['n_words_src'], options['dim_word'])
params['Wemb_dec'] = norm_weight(options['n_words'], options['dim_word'])
# encoder: bidirectional RNN
params = get_layer(options['encoder'])[0](options, params, prefix='encoder',
nin=options['dim_word'],
dim=options['dim'])
params = get_layer(options['encoder'])[0](options, params, prefix='encoder_r',
nin=options['dim_word'],
dim=options['dim'])
ctxdim = 2 * options['dim']
# init_state, init_cell
params = get_layer('ff')[0](options, params, prefix='ff_state',
nin=ctxdim, nout=options['dim'])
# decoder
params = get_layer(options['decoder'])[0](options, params, prefix='decoder',
nin=options['dim_word'],
dim=options['dim'],
dimctx=ctxdim)
# readout
params = get_layer('ff')[0](options, params, prefix='ff_logit_lstm',
nin=options['dim'], nout=options['dim_word'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_prev',
nin=options['dim_word'], nout=options['dim_word'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_ctx',
nin=ctxdim, nout=options['dim_word'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit',
nin=options['dim_word'], nout=options['n_words'])
return params
# build a training model
def build_model(tparams, options):
opt_ret = dict()
trng = RandomStreams(1234)
use_noise = theano.shared(numpy.float32(0.))
# description string: #words x #samples
x = tensor.matrix('x', dtype='int64')
x_mask = tensor.matrix('x_mask', dtype='float32')
y = tensor.matrix('y', dtype='int64')
y_mask = tensor.matrix('y_mask', dtype='float32')
xr = x[::-1]
xr_mask = x_mask[::-1]
n_timesteps = x.shape[0]
n_timesteps_trg = y.shape[0]
n_samples = x.shape[1]
emb = tparams['Wemb'][x.flatten()]
emb = emb.reshape([n_timesteps, n_samples, options['dim_word']])
proj = get_layer(options['encoder'])[1](tparams, emb, options,
prefix='encoder',
mask=x_mask)
embr = tparams['Wemb'][xr.flatten()]
embr = embr.reshape([n_timesteps, n_samples, options['dim_word']])
projr = get_layer(options['encoder'])[1](tparams, embr, options,
prefix='encoder_r',
mask=xr_mask)
ctx = concatenate([proj[0], projr[0][::-1]], axis=proj[0].ndim-1)
ctx_mean = (ctx * x_mask[:,:,None]).sum(0) / x_mask.sum(0)[:,None]
#ctx_mean = concatenate([proj[0][-1], projr[0][-1]], axis=proj[0].ndim-2)
# initial decoder state
init_state = get_layer('ff')[1](tparams, ctx_mean, options,
prefix='ff_state', activ='tanh')
# word embedding (target)
emb = tparams['Wemb_dec'][y.flatten()]
emb = emb.reshape([n_timesteps_trg, n_samples, options['dim_word']])
emb_shifted = tensor.zeros_like(emb)
emb_shifted = tensor.set_subtensor(emb_shifted[1:], emb[:-1])
emb = emb_shifted
# decoder
proj = get_layer(options['decoder'])[1](tparams, emb, options,
prefix='decoder',
mask=y_mask, context=ctx,
context_mask=x_mask,
one_step=False,
init_state=init_state)
proj_h = proj[0]
ctxs = proj[1]
opt_ret['dec_alphas'] = proj[2]
# compute word probabilities
logit_lstm = get_layer('ff')[1](tparams, proj_h, options,
prefix='ff_logit_lstm', activ='linear')
logit_prev = get_layer('ff')[1](tparams, emb, options,
prefix='ff_logit_prev', activ='linear')
logit_ctx = get_layer('ff')[1](tparams, ctxs, options,
prefix='ff_logit_ctx', activ='linear')
logit = tensor.tanh(logit_lstm+logit_prev+logit_ctx)
logit = get_layer('ff')[1](tparams, logit, options,
prefix='ff_logit', activ='linear')
logit_shp = logit.shape
probs = tensor.nnet.softmax(logit.reshape([logit_shp[0]*logit_shp[1],
logit_shp[2]]))
# cost
y_flat = y.flatten()
y_flat_idx = tensor.arange(y_flat.shape[0]) * options['n_words'] + y_flat
cost = -tensor.log(probs.flatten()[y_flat_idx])
cost = cost.reshape([y.shape[0],y.shape[1]])
cost = (cost * y_mask).sum(0)
return trng, use_noise, x, x_mask, y, y_mask, opt_ret, cost
# build a sampler
def build_sampler(tparams, options, trng):
x = tensor.matrix('x', dtype='int64')
xr = x[::-1]
n_timesteps = x.shape[0]
n_samples = x.shape[1]
# word embedding (source)
emb = tparams['Wemb'][x.flatten()]
emb = emb.reshape([n_timesteps, n_samples, options['dim_word']])
embr = tparams['Wemb'][xr.flatten()]
embr = embr.reshape([n_timesteps, n_samples, options['dim_word']])
# encoder
proj = get_layer(options['encoder'])[1](tparams, emb, options, prefix='encoder')
projr = get_layer(options['encoder'])[1](tparams, embr, options, prefix='encoder_r')
ctx = concatenate([proj[0],projr[0][::-1]], axis=proj[0].ndim-1)
ctx_mean = ctx.mean(0)
#ctx_mean = concatenate([proj[0][-1],projr[0][-1]], axis=proj[0].ndim-2)
init_state = get_layer('ff')[1](tparams, ctx_mean, options,
prefix='ff_state', activ='tanh')
print 'Building f_init...',
outs = [init_state, ctx]
f_init = theano.function([x], outs, name='f_init', profile=profile)
print 'Done'
# x: 1 x 1
y = tensor.vector('y_sampler', dtype='int64')
init_state = tensor.matrix('init_state', dtype='float32')
# if it's the first word, emb should be all zero
emb = tensor.switch(y[:,None] < 0,
tensor.alloc(0., 1, tparams['Wemb_dec'].shape[1]),
tparams['Wemb_dec'][y])
proj = get_layer(options['decoder'])[1](tparams, emb, options,
prefix='decoder',
mask=None, context=ctx,
one_step=True,
init_state=init_state)
next_state = proj[0]
ctxs = proj[1]
logit_lstm = get_layer('ff')[1](tparams, next_state, options,
prefix='ff_logit_lstm', activ='linear')
logit_prev = get_layer('ff')[1](tparams, emb, options,
prefix='ff_logit_prev', activ='linear')
logit_ctx = get_layer('ff')[1](tparams, ctxs, options,
prefix='ff_logit_ctx', activ='linear')
logit = tensor.tanh(logit_lstm+logit_prev+logit_ctx)
logit = get_layer('ff')[1](tparams, logit, options,
prefix='ff_logit', activ='linear')
next_probs = tensor.nnet.softmax(logit)
next_sample = trng.multinomial(pvals=next_probs).argmax(1)
# next word probability
print 'Building f_next..',
inps = [y, ctx, init_state]
outs = [next_probs, next_sample, next_state]
f_next = theano.function(inps, outs, name='f_next', profile=profile)
print 'Done'
return f_init, f_next
# generate sample
def gen_sample(tparams, f_init, f_next, x, options, trng=None, k=1, maxlen=30,
stochastic=True, argmax=False):
if k > 1:
assert not stochastic, 'Beam search does not support stochastic sampling'
sample = []
sample_score = []
if stochastic:
sample_score = 0
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
hyp_scores = numpy.zeros(live_k).astype('float32')
hyp_states = []
ret = f_init(x)
next_state, ctx0 = ret[0], ret[1]
next_w = -1 * numpy.ones((1,)).astype('int64')
for ii in xrange(maxlen):
ctx = numpy.tile(ctx0, [live_k, 1])
inps = [next_w, ctx, next_state]
ret = f_next(*inps)
next_p, next_w, next_state = ret[0], ret[1], ret[2]
if stochastic:
if argmax:
nw = next_p[0].argmax()
else:
nw = next_w[0]
sample.append(nw)
sample_score += next_p[0,nw]
if nw == 0:
break
else:
cand_scores = hyp_scores[:,None] - numpy.log(next_p)
cand_flat = cand_scores.flatten()
ranks_flat = cand_flat.argsort()[:(k-dead_k)]
voc_size = next_p.shape[1]
trans_indices = ranks_flat / voc_size
word_indices = ranks_flat % voc_size
costs = cand_flat[ranks_flat]
new_hyp_samples = []
new_hyp_scores = numpy.zeros(k-dead_k).astype('float32')
new_hyp_states = []
for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
new_hyp_samples.append(hyp_samples[ti]+[wi])
new_hyp_scores[idx] = copy.copy(costs[ti])
new_hyp_states.append(copy.copy(next_state[ti]))
# check the finished samples
new_live_k = 0
hyp_samples = []
hyp_scores = []
hyp_states = []
for idx in xrange(len(new_hyp_samples)):
if new_hyp_samples[idx][-1] == 0:
sample.append(new_hyp_samples[idx])
sample_score.append(new_hyp_scores[idx])
dead_k += 1
else:
new_live_k += 1
hyp_samples.append(new_hyp_samples[idx])
hyp_scores.append(new_hyp_scores[idx])
hyp_states.append(new_hyp_states[idx])
hyp_scores = numpy.array(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_w = numpy.array([w[-1] for w in hyp_samples])
next_state = numpy.array(hyp_states)
if not stochastic:
# dump every remaining one
if live_k > 0:
for idx in xrange(live_k):
sample.append(hyp_samples[idx])
sample_score.append(hyp_scores[idx])
return sample, sample_score
def pred_probs(f_log_probs, prepare_data, options, iterator, verbose=True):
probs = []
n_done = 0
for x, y in iterator:
n_done += len(x)
x, x_mask, y, y_mask = prepare_data(x, y,
n_words_src=options['n_words_src'],
n_words=options['n_words'])
pprobs = f_log_probs(x,x_mask,y,y_mask)
for pp in pprobs:
probs.append(pp)
if numpy.isnan(numpy.mean(probs)):
import ipdb; ipdb.set_trace()
if verbose:
print >>sys.stderr, '%d samples computed'%(n_done)
return numpy.array(probs)
# optimizers
# name(hyperp, tparams, grads, inputs (list), cost) = f_grad_shared, f_update
def adam(lr, tparams, grads, inp, cost):
gshared = [theano.shared(p.get_value() * 0.,
name='%s_grad'%k)
for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function(inp, cost, updates=gsup, profile=profile)
lr0 = 0.0002
b1 = 0.1
b2 = 0.001
e = 1e-8
updates = []
i = theano.shared(numpy.float32(0.))
i_t = i + 1.
fix1 = 1. - b1**(i_t)
fix2 = 1. - b2**(i_t)
lr_t = lr0 * (tensor.sqrt(fix2) / fix1)
for p, g in zip(tparams.values(), gshared):
m = theano.shared(p.get_value() * 0.)
v = theano.shared(p.get_value() * 0.)
m_t = (b1 * g) + ((1. - b1) * m)
v_t = (b2 * tensor.sqr(g)) + ((1. - b2) * v)
g_t = m_t / (tensor.sqrt(v_t) + e)
p_t = p - (lr_t * g_t)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
updates.append((i, i_t))
f_update = theano.function([lr], [], updates=updates,
on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def adadelta(lr, tparams, grads, inp, cost):
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_grad'%k)
for k, p in tparams.iteritems()]
running_up2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rup2'%k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rgrad2'%k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2)) for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup+rg2up, profile=profile)
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg for zg, ru2, rg2 in zip(zipped_grads, running_up2, running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2)) for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(itemlist(tparams), updir)]
f_update = theano.function([lr], [], updates=ru2up+param_up, on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def rmsprop(lr, tparams, grads, inp, cost):
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_grad'%k) for k, p in tparams.iteritems()]
running_grads = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rgrad'%k) for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rgrad2'%k) for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rgup = [(rg, 0.95 * rg + 0.05 * g) for rg, g in zip(running_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2)) for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup+rgup+rg2up, profile=profile)
updir = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_updir'%k) for k, p in tparams.iteritems()]
updir_new = [(ud, 0.9 * ud - 1e-4 * zg / tensor.sqrt(rg2 - rg ** 2 + 1e-4)) for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads, running_grads2)]
param_up = [(p, p + udn[1]) for p, udn in zip(itemlist(tparams), updir_new)]
f_update = theano.function([lr], [], updates=updir_new+param_up, on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def sgd(lr, tparams, grads, x, mask, y, cost):
gshared = [theano.shared(p.get_value() * 0., name='%s_grad'%k) for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function([x, mask, y], cost, updates=gsup, profile=profile)
pup = [(p, p - lr * g) for p, g in zip(itemlist(tparams), gshared)]
f_update = theano.function([lr], [], updates=pup, profile=profile)
return f_grad_shared, f_update
def train(dim_word=100, # word vector dimensionality
dim=1000, # the number of LSTM units
encoder='gru',
decoder='gru_cond',
patience=10,
max_epochs=5000,
dispFreq=100,
decay_c=0.,
alpha_c=0.,
diag_c=0.,
clip_c=-1.,
lrate=0.01,
n_words_src=100000,
n_words=100000,
maxlen=100, # maximum length of the description
optimizer='rmsprop',
batch_size = 16,
valid_batch_size = 16,
saveto='model.npz',
validFreq=1000,
saveFreq=1000, # save the parameters after every saveFreq updates
sampleFreq=100, # generate some samples after every sampleFreq updates
datasets=['/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.en.tok',
'/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.fr.tok'],
valid_datasets=['../data/dev/newstest2011.en.tok', '../data/dev/newstest2011.fr.tok'],
dictionaries=['/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.en.tok.pkl',
'/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.fr.tok.pkl'],
use_dropout=False,
reload_=False):
# Model options
model_options = locals().copy()
worddicts = [None] * len(dictionaries)
worddicts_r = [None] * len(dictionaries)
for ii, dd in enumerate(dictionaries):
with open(dd, 'rb') as f:
worddicts[ii] = pkl.load(f)
worddicts_r[ii] = dict()
for kk, vv in worddicts[ii].iteritems():
worddicts_r[ii][vv] = kk
# reload options
if reload_ and os.path.exists(saveto):
with open('%s.pkl'%saveto, 'rb') as f:
models_options = pkl.load(f)
print 'Loading data'
train = TextIterator(datasets[0], datasets[1],
dictionaries[0], dictionaries[1],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=batch_size,
maxlen=maxlen)
valid = TextIterator(valid_datasets[0], valid_datasets[1],
dictionaries[0], dictionaries[1],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=valid_batch_size,
maxlen=maxlen)
print 'Building model'
params = init_params(model_options)
# reload parameters
if reload_ and os.path.exists(saveto):
params = load_params(saveto, params)
tparams = init_tparams(params)
trng, use_noise, \
x, x_mask, y, y_mask, \
opt_ret, \
cost = \
build_model(tparams, model_options)
inps = [x, x_mask, y, y_mask]
print 'Buliding sampler'
f_init, f_next = build_sampler(tparams, model_options, trng)
# before any regularizer
print 'Building f_log_probs...',
f_log_probs = theano.function(inps, cost, profile=profile)
print 'Done'
cost = cost.mean()
if decay_c > 0.:
decay_c = theano.shared(numpy.float32(decay_c), name='decay_c')
weight_decay = 0.
for kk, vv in tparams.iteritems():
weight_decay += (vv ** 2).sum()
weight_decay *= decay_c
cost += weight_decay
if alpha_c > 0. and not model_options['decoder'].endswith('simple'):
alpha_c = theano.shared(numpy.float32(alpha_c), name='alpha_c')
alpha_reg = alpha_c * ((tensor.cast(y_mask.sum(0)//x_mask.sum(0), 'float32')[:,None]-
opt_ret['dec_alphas'].sum(0))**2).sum(1).mean()
cost += alpha_reg
# after any regularizer
print 'Building f_cost...',
f_cost = theano.function(inps, cost, profile=profile)
print 'Done'
print 'Computing gradient...',
grads = tensor.grad(cost, wrt=itemlist(tparams))
print 'Done'
print 'Building f_grad...',
f_grad = theano.function(inps, grads, profile=profile)
print 'Done'
if clip_c > 0.:
g2 = 0.
for g in grads:
g2 += (g**2).sum()
new_grads = []
for g in grads:
new_grads.append(tensor.switch(g2 > (clip_c**2),
g / tensor.sqrt(g2) * clip_c,
g))
grads = new_grads
lr = tensor.scalar(name='lr')
print 'Building optimizers...',
f_grad_shared, f_update = eval(optimizer)(lr, tparams, grads, inps, cost)
print 'Done'
print 'Optimization'
history_errs = []
# reload history
if reload_ and os.path.exists(saveto):
history_errs = list(numpy.load(saveto)['history_errs'])
best_p = None
bad_count = 0
if validFreq == -1:
validFreq = len(train[0])/batch_size
if saveFreq == -1:
saveFreq = len(train[0])/batch_size
if sampleFreq == -1:
sampleFreq = len(train[0])/batch_size
uidx = 0
estop = False
for eidx in xrange(max_epochs):
n_samples = 0
for x, y in train:
n_samples += len(x)
uidx += 1
use_noise.set_value(1.)
x, x_mask, y, y_mask = prepare_data(x, y, maxlen=maxlen,
n_words_src=n_words_src,
n_words=n_words)
if x == None:
print 'Minibatch with zero sample under length ', maxlen
uidx -= 1
continue
ud_start = time.time()
cost = f_grad_shared(x, x_mask, y, y_mask)
f_update(lrate)
ud = time.time() - ud_start
if numpy.isnan(cost) or numpy.isinf(cost):
print 'NaN detected'
return 1., 1., 1.
if numpy.mod(uidx, dispFreq) == 0:
print 'Epoch ', eidx, 'Update ', uidx, 'Cost ', cost, 'UD ', ud
if numpy.mod(uidx, saveFreq) == 0:
print 'Saving...',
#import ipdb; ipdb.set_trace()
if best_p != None:
params = best_p
else:
params = unzip(tparams)
numpy.savez(saveto, history_errs=history_errs, **params)
pkl.dump(model_options, open('%s.pkl'%saveto, 'wb'))
print 'Done'
if numpy.mod(uidx, sampleFreq) == 0:
# FIXME: random selection?
for jj in xrange(numpy.minimum(5,x.shape[1])):
stochastic = True
sample, score = gen_sample(tparams, f_init, f_next, x[:,jj][:,None],
model_options, trng=trng, k=1, maxlen=30,
stochastic=stochastic, argmax=False)
print 'Source ',jj,': ',
for vv in x[:,jj]:
if vv == 0:
break
if vv in worddicts_r[0]:
print worddicts_r[0][vv],
else:
print 'UNK',
print
print 'Truth ',jj,' : ',
for vv in y[:,jj]:
if vv == 0:
break
if vv in worddicts_r[1]:
print worddicts_r[1][vv],
else:
print 'UNK',
print
print 'Sample ', jj, ': ',
if stochastic:
ss = sample
else:
score = score / numpy.array([len(s) for s in sample])
ss = sample[score.argmin()]
for vv in ss:
if vv == 0:
break
if vv in worddicts_r[1]:
print worddicts_r[1][vv],
else:
print 'UNK',
print
if numpy.mod(uidx, validFreq) == 0:
use_noise.set_value(0.)
valid_errs = pred_probs(f_log_probs, prepare_data, model_options, valid)
valid_err = valid_errs.mean()
history_errs.append(valid_err)
if uidx == 0 or valid_err <= numpy.array(history_errs).min():
best_p = unzip(tparams)
bad_counter = 0
if len(history_errs) > patience and valid_err >= numpy.array(history_errs)[:-patience].min():
bad_counter += 1
if bad_counter > patience:
print 'Early Stop!'
estop = True
break
if numpy.isnan(valid_err):
import ipdb; ipdb.set_trace()
print 'Valid ', valid_err
print 'Seen %d samples'%n_samples
if estop:
break
if best_p is not None:
zipp(best_p, tparams)
use_noise.set_value(0.)
valid_err = pred_probs(f_log_probs, prepare_data, model_options, valid).mean()
print 'Valid ', valid_err
params = copy.copy(best_p)
numpy.savez(saveto, zipped_params=best_p,
history_errs=history_errs,
**params)
return valid_err
if __name__ == '__main__':
pass
|
bsd-3-clause
|
amueller/scipy_2015_sklearn_tutorial
|
notebooks/figures/plot_kneighbors_regularization.py
|
25
|
1363
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsRegressor
def make_dataset(n_samples=100):
rnd = np.random.RandomState(42)
x = np.linspace(-3, 3, n_samples)
y_no_noise = np.sin(4 * x) + x
y = y_no_noise + rnd.normal(size=len(x))
return x, y
def plot_regression_datasets():
fig, axes = plt.subplots(1, 3, figsize=(15, 5))
for n_samples, ax in zip([10, 100, 1000], axes):
x, y = make_dataset(n_samples)
ax.plot(x, y, 'o', alpha=.6)
def plot_kneighbors_regularization():
rnd = np.random.RandomState(42)
x = np.linspace(-3, 3, 100)
y_no_noise = np.sin(4 * x) + x
y = y_no_noise + rnd.normal(size=len(x))
X = x[:, np.newaxis]
fig, axes = plt.subplots(1, 3, figsize=(15, 5))
x_test = np.linspace(-3, 3, 1000)
for n_neighbors, ax in zip([2, 5, 20], axes.ravel()):
kneighbor_regression = KNeighborsRegressor(n_neighbors=n_neighbors)
kneighbor_regression.fit(X, y)
ax.plot(x, y_no_noise, label="true function")
ax.plot(x, y, "o", label="data")
ax.plot(x_test, kneighbor_regression.predict(x_test[:, np.newaxis]),
label="prediction")
ax.legend()
ax.set_title("n_neighbors = %d" % n_neighbors)
if __name__ == "__main__":
plot_kneighbors_regularization()
plt.show()
|
cc0-1.0
|
asazo/ANN
|
tarea1/top_level_features.py
|
1
|
5128
|
from scipy.ndimage import uniform_filter
from scipy.misc import imread
import cPickle as pickle
import matplotlib
import numpy as np
import os
#The next functions were slightly modified from the work of Alex Krizhevsky
#http://www.cs.toronto.edu/~kriz/cifar.html
def extract_features(array_imgs, feature_fns, verbose=False):
"""
Given pixel data for images and several feature functions that can operate on
single images, apply all feature functions to all images, concatenating the
feature vectors for each image and storing the features for all images in
a single matrix.
Inputs:
- array_imgs: N array of pixel data for N images.
- feature_fns: List of k feature functions. The ith feature function should
take as input an H x W x D array and return a (one-dimensional) array of
length F_i. For CIFAR10, H=32, W=32, D=3
- verbose: Boolean; if true, print progress.
Returns:
An array of shape (N, F_1 + ... + F_k) where each column is the concatenation
of all features for a single image.
"""
imgs = array_imgs.reshape(array_imgs.shape[0], 3, 32, 32).transpose(0,2,3,1).astype("float")
print imgs.shape
num_images = imgs.shape[0]
if num_images == 0:
return np.array([])
# Use the first image to determine feature dimensions
feature_dims = []
first_image_features = []
for feature_fn in feature_fns:
feats = feature_fn(imgs[0].squeeze())
assert len(feats.shape) == 1, 'Feature functions must be one-dimensional'
feature_dims.append(feats.size)
first_image_features.append(feats)
# Now that we know the dimensions of the features, we can allocate a single
# big array to store all features as columns.
total_feature_dim = sum(feature_dims)
imgs_features = np.zeros((num_images, total_feature_dim))
imgs_features[0] = np.hstack(first_image_features).T
# Extract features for the rest of the images.
for i in xrange(1, num_images):
idx = 0
for feature_fn, feature_dim in zip(feature_fns, feature_dims):
next_idx = idx + feature_dim
imgs_features[i, idx:next_idx] = feature_fn(imgs[i].squeeze())
idx = next_idx
if verbose and i % 1000 == 0:
print 'Done extracting features for %d / %d images' % (i, num_images)
return imgs_features
def rgb2gray(rgb):
"""Convert RGB image to grayscale
Parameters:
rgb : RGB image
Returns:
gray : grayscale image
"""
return np.dot(rgb[...,:3], [0.299, 0.587, 0.144])
def hog_features(im):
"""Compute Histogram of Gradient (HOG) feature for an image
Modified from skimage.feature.hog
http://pydoc.net/Python/scikits-image/0.4.2/skimage.feature.hog
Reference:
Histograms of Oriented Gradients for Human Detection
Navneet Dalal and Bill Triggs, CVPR 2005
Parameters:
im : an input grayscale or rgb image
Returns:
feat: Histogram of Gradient (HOG) feature
"""
# convert rgb to grayscale if needed
if im.ndim == 3:
image = rgb2gray(im)
else:
image = np.at_least_2d(im)
sx, sy = image.shape # image size
orientations = 9 # number of gradient bins
cx, cy = (8, 8) # pixels per cell
gx = np.zeros(image.shape)
gy = np.zeros(image.shape)
gx[:, :-1] = np.diff(image, n=1, axis=1) # compute gradient on x-direction
gy[:-1, :] = np.diff(image, n=1, axis=0) # compute gradient on y-direction
grad_mag = np.sqrt(gx ** 2 + gy ** 2) # gradient magnitude
grad_ori = np.arctan2(gy, (gx + 1e-15)) * (180 / np.pi) + 90 # gradient orientation
n_cellsx = int(np.floor(sx / cx)) # number of cells in x
n_cellsy = int(np.floor(sy / cy)) # number of cells in y
# compute orientations integral images
orientation_histogram = np.zeros((n_cellsx, n_cellsy, orientations))
for i in range(orientations):
# create new integral image for this orientation
# isolate orientations in this range
temp_ori = np.where(grad_ori < 180 / orientations * (i + 1),
grad_ori, 0)
temp_ori = np.where(grad_ori >= 180 / orientations * i,
temp_ori, 0)
# select magnitudes for those orientations
cond2 = temp_ori > 0
temp_mag = np.where(cond2, grad_mag, 0)
orientation_histogram[:,:,i] = uniform_filter(temp_mag, size=(cx, cy))[cx/2::cx, cy/2::cy].T
return orientation_histogram.ravel()
def color_histogram_hsv(im, nbin=10, xmin=0, xmax=255, normalized=True):
"""
Compute color histogram for an image using hue.
Inputs:
- im: H x W x C array of pixel data for an RGB image.
- nbin: Number of histogram bins. (default: 10)
- xmin: Minimum pixel value (default: 0)
- xmax: Maximum pixel value (default: 255)
- normalized: Whether to normalize the histogram (default: True)
Returns:
1D vector of length nbin giving the color histogram over the hue of the
input image.
"""
ndim = im.ndim
bins = np.linspace(xmin, xmax, nbin+1)
hsv = matplotlib.colors.rgb_to_hsv(im/xmax) * xmax
imhist, bin_edges = np.histogram(hsv[:,:,0], bins=bins, density=normalized)
imhist = imhist * np.diff(bin_edges)
# return histogram
return imhist
|
mit
|
khs26/pele
|
pele/transition_states/tests/test_transition_state_search.py
|
5
|
5273
|
import unittest
from itertools import izip
import numpy as np
import matplotlib.pyplot as plt
from pele.transition_states import FindTransitionState, findTransitionState
from pele.potentials import BasePotential
from pele.potentials.tests import _base_test
from pele.potentials.tests._base_test import assert_arrays_almost_equal
class SimpleTSPot(BasePotential):
nfev = 0
# def __init__(self):
# self.nfcalls = 0
def getEnergyGradient(self, x):
self.nfev += 1
grad = np.zeros(x.size)
dx = x.copy()
dx[0] += 1
e1 = -np.exp(-np.dot(dx, dx))
grad -= 2 * dx * e1
dx = x.copy()
dx[0] -= 1
e2 = -np.exp(-np.dot(dx, dx))
grad -= 2 * dx * e2
return e1 + e2, grad
def getEnergy(self, x):
return self.getEnergyGradient(x)[0]
class HarmonicPot(BasePotential):
def getEnergy(self, x):
return np.dot(x, x) + x.sum()
def getEnergyGradient(self, x):
e = self.getEnergy(x)
return e, 2 * x + 1
def plot_pot():
x, y = np.meshgrid(np.arange(-2, 2, .1), np.arange(-2, 2, .1))
pot = SimpleTSPot()
energies = [pot.getEnergy(np.array([xi, yi])) for xi, yi in izip(x.reshape(-1), y.reshape(-1))]
energies = np.array(energies).reshape(x.shape)
plt.contourf(x, y, energies)
plt.show()
class TestSimpleTSPot(_base_test._TestConfiguration):
def setUp(self):
self.pot = SimpleTSPot()
self.x0 = np.array([.1, 1])
self.e0 = -0.27335478531821539
class TestHarmonicPot(_base_test._TestConfiguration):
def setUp(self):
self.pot = HarmonicPot()
self.x0 = np.array([.1, 1])
self.e0 = 2.11
def print_event(coords=None, **kwargs):
print "coords", coords
class TestFindTransitionStateSimplePot(unittest.TestCase):
def setUp(self):
self.pot = SimpleTSPot()
self.x0 = np.array([.1, .1])
self.xts = np.zeros(self.x0.size)
self.ets = self.pot.getEnergy(self.xts)
def test1(self):
# plot_pot()
opt = FindTransitionState(self.x0, self.pot, orthogZeroEigs=None,
# iprint=1,
# verbosity=10, event=print_event,
# tol=1e-3,
# lowestEigenvectorQuenchParams=dict(iprint=1, events=[print_event])
)
ret = opt.run()
self.assertTrue(ret.success)
assert_arrays_almost_equal(self, ret.coords, self.xts, places=3)
self.assertAlmostEqual(ret.energy, self.ets, delta=1e-3)
self.assertLess(ret.rms, 1e-3)
self.assertEqual(ret.nfev + 1, self.pot.nfev)
def test_wrapper(self):
ret = findTransitionState(self.x0, self.pot, orthogZeroEigs=None)
self.assertTrue(ret.success)
assert_arrays_almost_equal(self, ret.coords, self.xts, places=3)
def test_2(self):
self.called = False
def event(**kwargs):
self.called = True
opt = FindTransitionState(self.x0, self.pot, orthogZeroEigs=None,
tangentSpaceQuenchParams=dict(maxstep=1.),
event=event)
ret = opt.run()
self.assertTrue(ret.success)
self.assertTrue(self.called)
def test_from_near_minimum(self):
print "\n\nstarting from a minimum"
x0 = np.array([.6, .1])
opt = FindTransitionState(x0, self.pot, orthogZeroEigs=None,
iprint=1,
verbosity=10, # event=print_event,
# tol=1e-3,
# lowestEigenvectorQuenchParams=dict(iprint=1, events=[print_event])
)
ret = opt.run()
print ret
self.assertTrue(ret.success)
assert_arrays_almost_equal(self, ret.coords, self.xts, places=3)
def test_from_near_minimum_demand_negative_eigenvalue(self):
print "\n\nstarting from a minimum demand"
# demand that the eigenvalue is negative initially.
# this should fail right away
x0 = np.array([.6, .1])
opt = FindTransitionState(x0, self.pot, orthogZeroEigs=None,
demand_initial_negative_vec=True,
iprint=1,
verbosity=10, # event=print_event,
# tol=1e-3,
# lowestEigenvectorQuenchParams=dict(iprint=1, events=[print_event])
)
ret = opt.run()
print ret
self.assertFalse(ret.success)
self.assertEqual(ret.nsteps, 0)
class TestFindTS_BadPotential(unittest.TestCase):
def test1(self):
print "\n\ntesting find ts with harmonic potential"
pot = HarmonicPot()
x0 = np.array([.2, 0])
opt = FindTransitionState(x0, pot, orthogZeroEigs=None,
iprint=1,
verbosity=10, # event=print_event,
hessian_diagonalization=True
)
ret = opt.run()
self.assertFalse(ret.success)
print ret
if __name__ == "__main__":
unittest.main()
|
gpl-3.0
|
sagivba/MachineLearningUtils
|
Examples/titanic_usecase/common_titanic.py
|
1
|
2227
|
import numpy as np
import pandas as pd
from MachineLearningUtils.UsefulMethods import UsefulMethods
from MachineLearningUtils.CommonFeatureEngineering import DataFrameManipulation
class common_titanic_things():
def __init__(self, example_number):
self.prd_lbl, self.actl_lbl = "PrdictedSurvived", "Survived"
self.example_number = example_number
self.input_path = ".\\titanic_data"
self.output_path = ".\\titanic_data\\{}".format(example_number)
self.map_dict = {
"Embarked": {'S': 1, 'C': 2, 'Q': 3, np.NaN: -99},
"Sex": {"male": 0, "female": 1}
}
self.age_estimator = None
def output_csv_name(self, file_name):
return self.output_path + "\\" + file_name
def load_data(self, file_name):
return pd.read_csv("{}\\{}".format(self.input_path, file_name))
def load_train_data(self):
return self.load_data("train.csv")
def load_test_data(self):
return self.load_data("test.csv")
def load_complete_data(self):
"""
will be use to create estimators to fillNA
:return:
"""
df1 = self.load_train_data()
df2 = self.load_test_data()
manipulator = DataFrameManipulation(df1)
df1 = manipulator.drop_columns([self.actl_lbl])
return pd.concat([df1, df2])
def prepare_kaggle_file(self, mu, prep_data):
test_df = self.load_data("test.csv")
print("test_df:")
print(test_df.head())
test_df_prep = prep_data(test_df, self)
test_df_prep.to_csv(path_or_buf=self.output_csv_name("test_data.csv"), index=False)
print(list(test_df))
result_df = mu.test_model(test_df_prep)
print("results_df:")
print(result_df.head())
final_df = UsefulMethods.create_submition_df(test_df[["PassengerId"]], result_df[["PrdictedSurvived"]],
"PrdictedSurvived", "Survived")
# final_df=pd.concat([test_df[["PassengerId"]],result_df[["PrdictedSurvived"]]],axis=1)
print("final_df:")
print(final_df.head())
final_df.to_csv(path_or_buf=self.output_csv_name("final_df.csv"), index=False)
|
mit
|
jpmpentwater/cvxpy
|
examples/expr_trees/inpainting.py
|
12
|
3379
|
from scipy import misc
import matplotlib.pyplot as plt
import numpy as np
l = misc.lena()
l = l.astype(np.float64, copy=False)
l = l/np.max(l) #rescale pixels into [0,1]
plt.imshow(l, cmap=plt.cm.gray)
#plt.show()
from PIL import Image, ImageDraw
num_lines = 5
width = 5
imshape = l.shape
def drawRandLine(draw,width):
x = [np.random.randint(0,im.size[0]) for i in range(2)]
y = [np.random.randint(0,im.size[1]) for i in range(2)]
xy = zip(x,y)
#fill gives the color
draw.line(xy,fill=255,width=width)
im = Image.new("L",imshape)
draw = ImageDraw.Draw(im)
for i in range(num_lines):
drawRandLine(draw,width)
del draw
# im.show()
err = np.asarray(im,dtype=np.bool)
r = l.copy()
r[err] = 1.0
plt.imshow(r, cmap=plt.cm.gray)
import itertools
idx2pair = np.nonzero(err)
idx2pair = zip(idx2pair[0].tolist(), idx2pair[1].tolist())
pair2idx = dict(itertools.izip(idx2pair, xrange(len(idx2pair))))
idx2pair = np.array(idx2pair) #convert back to numpy array
import scipy.sparse as sp
from cvxopt import spmatrix
def involvedpairs(pairs):
''' Get all the pixel pairs whose gradient involves an unknown pixel.
Input should be a set or dictionary of pixel pair tuples
'''
for pair in pairs: #loop through unknown pixels
yield pair
left = (pair[0],pair[1]-1)
if left[1] >= 0 and left not in pairs: #if 'left' in picture, and not already unknown
yield left
top = (pair[0]-1,pair[1])
topright = (pair[0]-1,pair[1]+1)
#if not on top boundary, top is fixed, and top not already touched by upper right pixel
if pair[0] > 0 and top not in pairs and topright not in pairs:
yield top
def formCOO(pair2idx, img):
m, n = img.shape
Is, Js, Vs, bs = [[],[]], [[],[]], [[],[]], [[],[]]
row = 0
for pixel1 in involvedpairs(pair2idx):
bottom = (pixel1[0]+1,pixel1[1])
right= (pixel1[0],pixel1[1]+1)
for i, pixel2 in enumerate([bottom, right]):
if pixel2[0] >= m or pixel2[1] >= n:
bs[i].append(0)
continue
b = 0
for j, pix in enumerate([pixel2, pixel1]):
if pix in pair2idx: #unknown pixel
Is[i].append(row)
Js[i].append(pair2idx[pix])
Vs[i].append(pow(-1,j))
else: #known pixel
b += pow(-1,j)*img[pix]
bs[i].append(b)
row += 1
'''
Form Gx and Gy such that the x-component of the gradient is Gx*x + bx,
where x is an array representing the unknown pixel values.
'''
m = len(bs[0])
n = len(pair2idx)
Gx = spmatrix(Vs[1], Is[1], Js[1],(m,n))
Gy = spmatrix(Vs[0], Is[0], Js[0],(m,n))
bx = np.array(bs[1])
by = np.array(bs[0])
return Gx, Gy, bx, by
Gx, Gy, bx, by = formCOO(pair2idx, r)
import cvxpy as cp
m, n = Gx.size
x = cp.Variable(n)
#z = cp.vstack((x.__rmul__(Gx) + bx).T, (x.__rmul__(Gy) + by).T)
#z = cp.hstack(x.__rmul__(Gx) + bx, x.__rmul__(Gy) + by)
z = cp.Variable(m, 2)
constraints = [z[:, 0] == x.__rmul__(Gx) + bx,
z[:, 1] == x.__rmul__(Gy) + by]
objective = cp.Minimize(sum([cp.norm(z[i,:]) for i in range(m)]))
p = cp.Problem(objective, constraints)
import cProfile
cProfile.run("""
result = p.solve(solver=cp.ECOS, verbose=True)
""")
|
gpl-3.0
|
lancezlin/ml_template_py
|
lib/python2.7/site-packages/pandas/tests/frame/test_misc_api.py
|
7
|
16059
|
# -*- coding: utf-8 -*-
from __future__ import print_function
# pylint: disable-msg=W0612,E1101
from copy import deepcopy
import sys
import nose
from distutils.version import LooseVersion
from pandas.compat import range, lrange
from pandas import compat
from numpy.random import randn
import numpy as np
from pandas import DataFrame, Series
import pandas as pd
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class SharedWithSparse(object):
_multiprocess_can_split_ = True
def test_copy_index_name_checking(self):
# don't want to be able to modify the index stored elsewhere after
# making a copy
for attr in ('index', 'columns'):
ind = getattr(self.frame, attr)
ind.name = None
cp = self.frame.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.frame, attr).name)
def test_getitem_pop_assign_name(self):
s = self.frame['A']
self.assertEqual(s.name, 'A')
s = self.frame.pop('A')
self.assertEqual(s.name, 'A')
s = self.frame.ix[:, 'B']
self.assertEqual(s.name, 'B')
s2 = s.ix[:]
self.assertEqual(s2.name, 'B')
def test_get_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
result = self.frame.get_value(idx, col)
expected = self.frame[col][idx]
tm.assert_almost_equal(result, expected)
def test_join_index(self):
# left / right
f = self.frame.reindex(columns=['A', 'B'])[:10]
f2 = self.frame.reindex(columns=['C', 'D'])
joined = f.join(f2)
self.assert_index_equal(f.index, joined.index)
self.assertEqual(len(joined.columns), 4)
joined = f.join(f2, how='left')
self.assert_index_equal(joined.index, f.index)
self.assertEqual(len(joined.columns), 4)
joined = f.join(f2, how='right')
self.assert_index_equal(joined.index, f2.index)
self.assertEqual(len(joined.columns), 4)
# inner
f = self.frame.reindex(columns=['A', 'B'])[:10]
f2 = self.frame.reindex(columns=['C', 'D'])
joined = f.join(f2, how='inner')
self.assert_index_equal(joined.index, f.index.intersection(f2.index))
self.assertEqual(len(joined.columns), 4)
# outer
f = self.frame.reindex(columns=['A', 'B'])[:10]
f2 = self.frame.reindex(columns=['C', 'D'])
joined = f.join(f2, how='outer')
self.assertTrue(tm.equalContents(self.frame.index, joined.index))
self.assertEqual(len(joined.columns), 4)
assertRaisesRegexp(ValueError, 'join method', f.join, f2, how='foo')
# corner case - overlapping columns
for how in ('outer', 'left', 'inner'):
with assertRaisesRegexp(ValueError, 'columns overlap but '
'no suffix'):
self.frame.join(self.frame, how=how)
def test_join_index_more(self):
af = self.frame.ix[:, ['A', 'B']]
bf = self.frame.ix[::2, ['C', 'D']]
expected = af.copy()
expected['C'] = self.frame['C'][::2]
expected['D'] = self.frame['D'][::2]
result = af.join(bf)
assert_frame_equal(result, expected)
result = af.join(bf, how='right')
assert_frame_equal(result, expected[::2])
result = bf.join(af, how='right')
assert_frame_equal(result, expected.ix[:, result.columns])
def test_join_index_series(self):
df = self.frame.copy()
s = df.pop(self.frame.columns[-1])
joined = df.join(s)
# TODO should this check_names ?
assert_frame_equal(joined, self.frame, check_names=False)
s.name = None
assertRaisesRegexp(ValueError, 'must have a name', df.join, s)
def test_join_overlap(self):
df1 = self.frame.ix[:, ['A', 'B', 'C']]
df2 = self.frame.ix[:, ['B', 'C', 'D']]
joined = df1.join(df2, lsuffix='_df1', rsuffix='_df2')
df1_suf = df1.ix[:, ['B', 'C']].add_suffix('_df1')
df2_suf = df2.ix[:, ['B', 'C']].add_suffix('_df2')
no_overlap = self.frame.ix[:, ['A', 'D']]
expected = df1_suf.join(df2_suf).join(no_overlap)
# column order not necessarily sorted
assert_frame_equal(joined, expected.ix[:, joined.columns])
def test_add_prefix_suffix(self):
with_prefix = self.frame.add_prefix('foo#')
expected = pd.Index(['foo#%s' % c for c in self.frame.columns])
self.assert_index_equal(with_prefix.columns, expected)
with_suffix = self.frame.add_suffix('#foo')
expected = pd.Index(['%s#foo' % c for c in self.frame.columns])
self.assert_index_equal(with_suffix.columns, expected)
class TestDataFrameMisc(tm.TestCase, SharedWithSparse, TestData):
klass = DataFrame
_multiprocess_can_split_ = True
def test_get_axis(self):
f = self.frame
self.assertEqual(f._get_axis_number(0), 0)
self.assertEqual(f._get_axis_number(1), 1)
self.assertEqual(f._get_axis_number('index'), 0)
self.assertEqual(f._get_axis_number('rows'), 0)
self.assertEqual(f._get_axis_number('columns'), 1)
self.assertEqual(f._get_axis_name(0), 'index')
self.assertEqual(f._get_axis_name(1), 'columns')
self.assertEqual(f._get_axis_name('index'), 'index')
self.assertEqual(f._get_axis_name('rows'), 'index')
self.assertEqual(f._get_axis_name('columns'), 'columns')
self.assertIs(f._get_axis(0), f.index)
self.assertIs(f._get_axis(1), f.columns)
assertRaisesRegexp(ValueError, 'No axis named', f._get_axis_number, 2)
assertRaisesRegexp(ValueError, 'No axis.*foo', f._get_axis_name, 'foo')
assertRaisesRegexp(ValueError, 'No axis.*None', f._get_axis_name, None)
assertRaisesRegexp(ValueError, 'No axis named', f._get_axis_number,
None)
def test_keys(self):
getkeys = self.frame.keys
self.assertIs(getkeys(), self.frame.columns)
def test_column_contains_typeerror(self):
try:
self.frame.columns in self.frame
except TypeError:
pass
def test_not_hashable(self):
df = pd.DataFrame([1])
self.assertRaises(TypeError, hash, df)
self.assertRaises(TypeError, hash, self.empty)
def test_new_empty_index(self):
df1 = DataFrame(randn(0, 3))
df2 = DataFrame(randn(0, 3))
df1.index.name = 'foo'
self.assertIsNone(df2.index.name)
def test_array_interface(self):
with np.errstate(all='ignore'):
result = np.sqrt(self.frame)
tm.assertIsInstance(result, type(self.frame))
self.assertIs(result.index, self.frame.index)
self.assertIs(result.columns, self.frame.columns)
assert_frame_equal(result, self.frame.apply(np.sqrt))
def test_get_agg_axis(self):
cols = self.frame._get_agg_axis(0)
self.assertIs(cols, self.frame.columns)
idx = self.frame._get_agg_axis(1)
self.assertIs(idx, self.frame.index)
self.assertRaises(ValueError, self.frame._get_agg_axis, 2)
def test_nonzero(self):
self.assertTrue(self.empty.empty)
self.assertFalse(self.frame.empty)
self.assertFalse(self.mixed_frame.empty)
# corner case
df = DataFrame({'A': [1., 2., 3.],
'B': ['a', 'b', 'c']},
index=np.arange(3))
del df['A']
self.assertFalse(df.empty)
def test_iteritems(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])
for k, v in compat.iteritems(df):
self.assertEqual(type(v), Series)
def test_iter(self):
self.assertTrue(tm.equalContents(list(self.frame), self.frame.columns))
def test_iterrows(self):
for i, (k, v) in enumerate(self.frame.iterrows()):
exp = self.frame.xs(self.frame.index[i])
assert_series_equal(v, exp)
for i, (k, v) in enumerate(self.mixed_frame.iterrows()):
exp = self.mixed_frame.xs(self.mixed_frame.index[i])
assert_series_equal(v, exp)
def test_itertuples(self):
for i, tup in enumerate(self.frame.itertuples()):
s = Series(tup[1:])
s.name = tup[0]
expected = self.frame.ix[i, :].reset_index(drop=True)
assert_series_equal(s, expected)
df = DataFrame({'floats': np.random.randn(5),
'ints': lrange(5)}, columns=['floats', 'ints'])
for tup in df.itertuples(index=False):
tm.assertIsInstance(tup[1], np.integer)
df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]})
dfaa = df[['a', 'a']]
self.assertEqual(list(dfaa.itertuples()), [
(0, 1, 1), (1, 2, 2), (2, 3, 3)])
self.assertEqual(repr(list(df.itertuples(name=None))),
'[(0, 1, 4), (1, 2, 5), (2, 3, 6)]')
tup = next(df.itertuples(name='TestName'))
# no support for field renaming in Python 2.6, regular tuples are
# returned
if sys.version >= LooseVersion('2.7'):
self.assertEqual(tup._fields, ('Index', 'a', 'b'))
self.assertEqual((tup.Index, tup.a, tup.b), tup)
self.assertEqual(type(tup).__name__, 'TestName')
df.columns = ['def', 'return']
tup2 = next(df.itertuples(name='TestName'))
self.assertEqual(tup2, (0, 1, 4))
if sys.version >= LooseVersion('2.7'):
self.assertEqual(tup2._fields, ('Index', '_1', '_2'))
df3 = DataFrame(dict(('f' + str(i), [i]) for i in range(1024)))
# will raise SyntaxError if trying to create namedtuple
tup3 = next(df3.itertuples())
self.assertFalse(hasattr(tup3, '_fields'))
self.assertIsInstance(tup3, tuple)
def test_len(self):
self.assertEqual(len(self.frame), len(self.frame.index))
def test_as_matrix(self):
frame = self.frame
mat = frame.as_matrix()
frameCols = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frameCols[j]
if np.isnan(value):
self.assertTrue(np.isnan(frame[col][i]))
else:
self.assertEqual(value, frame[col][i])
# mixed type
mat = self.mixed_frame.as_matrix(['foo', 'A'])
self.assertEqual(mat[0, 0], 'bar')
df = DataFrame({'real': [1, 2, 3], 'complex': [1j, 2j, 3j]})
mat = df.as_matrix()
self.assertEqual(mat[0, 0], 1j)
# single block corner case
mat = self.frame.as_matrix(['A', 'B'])
expected = self.frame.reindex(columns=['A', 'B']).values
assert_almost_equal(mat, expected)
def test_values(self):
self.frame.values[:, 0] = 5.
self.assertTrue((self.frame.values[:, 0] == 5).all())
def test_deepcopy(self):
cp = deepcopy(self.frame)
series = cp['A']
series[:] = 10
for idx, value in compat.iteritems(series):
self.assertNotEqual(self.frame['A'][idx], value)
# ---------------------------------------------------------------------
# Transposing
def test_transpose(self):
frame = self.frame
dft = frame.T
for idx, series in compat.iteritems(dft):
for col, value in compat.iteritems(series):
if np.isnan(value):
self.assertTrue(np.isnan(frame[col][idx]))
else:
self.assertEqual(value, frame[col][idx])
# mixed type
index, data = tm.getMixedTypeDict()
mixed = DataFrame(data, index=index)
mixed_T = mixed.T
for col, s in compat.iteritems(mixed_T):
self.assertEqual(s.dtype, np.object_)
def test_transpose_get_view(self):
dft = self.frame.T
dft.values[:, 5:10] = 5
self.assertTrue((self.frame.values[5:10] == 5).all())
def test_swapaxes(self):
df = DataFrame(np.random.randn(10, 5))
assert_frame_equal(df.T, df.swapaxes(0, 1))
assert_frame_equal(df.T, df.swapaxes(1, 0))
assert_frame_equal(df, df.swapaxes(0, 0))
self.assertRaises(ValueError, df.swapaxes, 2, 5)
def test_axis_aliases(self):
f = self.frame
# reg name
expected = f.sum(axis=0)
result = f.sum(axis='index')
assert_series_equal(result, expected)
expected = f.sum(axis=1)
result = f.sum(axis='columns')
assert_series_equal(result, expected)
def test_more_asMatrix(self):
values = self.mixed_frame.as_matrix()
self.assertEqual(values.shape[1], len(self.mixed_frame.columns))
def test_repr_with_mi_nat(self):
df = DataFrame({'X': [1, 2]},
index=[[pd.NaT, pd.Timestamp('20130101')], ['a', 'b']])
res = repr(df)
exp = ' X\nNaT a 1\n2013-01-01 b 2'
self.assertEqual(res, exp)
def test_iterkv_deprecation(self):
with tm.assert_produces_warning(FutureWarning):
self.mixed_float.iterkv()
def test_iterkv_names(self):
for k, v in compat.iteritems(self.mixed_frame):
self.assertEqual(v.name, k)
def test_series_put_names(self):
series = self.mixed_frame._series
for k, v in compat.iteritems(series):
self.assertEqual(v.name, k)
def test_empty_nonzero(self):
df = DataFrame([1, 2, 3])
self.assertFalse(df.empty)
df = DataFrame(index=['a', 'b'], columns=['c', 'd']).dropna()
self.assertTrue(df.empty)
self.assertTrue(df.T.empty)
def test_inplace_return_self(self):
# re #1893
data = DataFrame({'a': ['foo', 'bar', 'baz', 'qux'],
'b': [0, 0, 1, 1],
'c': [1, 2, 3, 4]})
def _check_f(base, f):
result = f(base)
self.assertTrue(result is None)
# -----DataFrame-----
# set_index
f = lambda x: x.set_index('a', inplace=True)
_check_f(data.copy(), f)
# reset_index
f = lambda x: x.reset_index(inplace=True)
_check_f(data.set_index('a'), f)
# drop_duplicates
f = lambda x: x.drop_duplicates(inplace=True)
_check_f(data.copy(), f)
# sort
f = lambda x: x.sort_values('b', inplace=True)
_check_f(data.copy(), f)
# sort_index
f = lambda x: x.sort_index(inplace=True)
_check_f(data.copy(), f)
# sortlevel
f = lambda x: x.sortlevel(0, inplace=True)
_check_f(data.set_index(['a', 'b']), f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(data.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(data.copy(), f)
# rename
f = lambda x: x.rename({1: 'foo'}, inplace=True)
_check_f(data.copy(), f)
# -----Series-----
d = data.copy()['c']
# reset_index
f = lambda x: x.reset_index(inplace=True, drop=True)
_check_f(data.set_index('a')['c'], f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(d.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(d.copy(), f)
# rename
f = lambda x: x.rename({1: 'foo'}, inplace=True)
_check_f(d.copy(), f)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
mit
|
JesseLivezey/plankton
|
pylearn2/train_extensions/roc_auc.py
|
15
|
4888
|
"""
TrainExtension subclass for calculating ROC AUC scores on monitoring
dataset(s), reported via monitor channels.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
__maintainer__ = "Steven Kearnes"
import numpy as np
try:
from sklearn.metrics import roc_auc_score
except ImportError:
roc_auc_score = None
import theano
from theano import gof, config
from theano import tensor as T
from pylearn2.train_extensions import TrainExtension
class RocAucScoreOp(gof.Op):
"""
Theano Op wrapping sklearn.metrics.roc_auc_score.
Parameters
----------
name : str, optional (default 'roc_auc')
Name of this Op.
use_c_code : WRITEME
"""
def __init__(self, name='roc_auc', use_c_code=theano.config.cxx):
super(RocAucScoreOp, self).__init__(use_c_code)
self.name = name
def make_node(self, y_true, y_score):
"""
Calculate ROC AUC score.
Parameters
----------
y_true : tensor_like
Target class labels.
y_score : tensor_like
Predicted class labels or probabilities for positive class.
"""
y_true = T.as_tensor_variable(y_true)
y_score = T.as_tensor_variable(y_score)
output = [T.scalar(name=self.name, dtype=config.floatX)]
return gof.Apply(self, [y_true, y_score], output)
def perform(self, node, inputs, output_storage):
"""
Calculate ROC AUC score.
Parameters
----------
node : Apply instance
Symbolic inputs and outputs.
inputs : list
Sequence of inputs.
output_storage : list
List of mutable 1-element lists.
"""
if roc_auc_score is None:
raise RuntimeError("Could not import from sklearn.")
y_true, y_score = inputs
try:
roc_auc = roc_auc_score(y_true, y_score)
except ValueError:
roc_auc = np.nan
output_storage[0][0] = theano._asarray(roc_auc, dtype=config.floatX)
class RocAucChannel(TrainExtension):
"""
Adds a ROC AUC channel to the monitor for each monitoring dataset.
This monitor will return nan unless both classes are represented in
y_true. For this reason, it is recommended to set monitoring_batches
to 1, especially when using unbalanced datasets.
Parameters
----------
channel_name_suffix : str, optional (default 'roc_auc')
Channel name suffix.
positive_class_index : int, optional (default 1)
Index of positive class in predicted values.
negative_class_index : int or None, optional (default None)
Index of negative class in predicted values for calculation of
one vs. one performance. If None, uses all examples not in the
positive class (one vs. the rest).
"""
def __init__(self, channel_name_suffix='roc_auc', positive_class_index=1,
negative_class_index=None):
self.channel_name_suffix = channel_name_suffix
self.positive_class_index = positive_class_index
self.negative_class_index = negative_class_index
def setup(self, model, dataset, algorithm):
"""
Add ROC AUC channels for monitoring dataset(s) to model.monitor.
Parameters
----------
model : object
The model being trained.
dataset : object
Training dataset.
algorithm : object
Training algorithm.
"""
m_space, m_source = model.get_monitoring_data_specs()
state, target = m_space.make_theano_batch()
y = T.argmax(target, axis=1)
y_hat = model.fprop(state)[:, self.positive_class_index]
# one vs. the rest
if self.negative_class_index is None:
y = T.eq(y, self.positive_class_index)
# one vs. one
else:
pos = T.eq(y, self.positive_class_index)
neg = T.eq(y, self.negative_class_index)
keep = T.add(pos, neg).nonzero()
y = T.eq(y[keep], self.positive_class_index)
y_hat = y_hat[keep]
roc_auc = RocAucScoreOp(self.channel_name_suffix)(y, y_hat)
roc_auc = T.cast(roc_auc, config.floatX)
for dataset_name, dataset in algorithm.monitoring_dataset.items():
if dataset_name:
channel_name = '{0}_{1}'.format(dataset_name,
self.channel_name_suffix)
else:
channel_name = self.channel_name_suffix
model.monitor.add_channel(name=channel_name,
ipt=(state, target),
val=roc_auc,
data_specs=(m_space, m_source),
dataset=dataset)
|
bsd-3-clause
|
rahul-c1/scikit-learn
|
sklearn/tests/test_grid_search.py
|
8
|
26766
|
"""
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import warnings
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import distributions
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans, SpectralClustering
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def test_parameter_grid():
"""Test basic properties of ParameterGrid."""
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
has_empty = ParameterGrid([{'C': [1, 10]}, {}])
assert_equal(len(has_empty), 3)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}])
def test_grid_search():
"""Test that the best estimator contains the right value for foo_param"""
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
"""Test search over a "grid" with only one point.
Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
"""
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]})
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
"""Test that grid search can be used for model selection only"""
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
"""Test that grid search will capture errors on data with different
length"""
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
"""Test that grid search works with both dense and sparse matrices"""
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
#np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
"""Test that grid search works when the input features are given in the
form of a precomputed kernel matrix """
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
"""Test that grid search returns an error with a non-square precomputed
training kernel matrix"""
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
"""Test that grid search returns an error when using a kernel_function"""
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
"""Regression test for bug in refitting
Simulates re-fitting a broken estimator; this used to break with
sparse SVMs.
"""
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_X_as_list():
"""Pass X as list in GridSearchCV"""
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
"""Pass y as list in GridSearchCV"""
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_bad_estimator():
# test grid-search with clustering algorithm which doesn't support
# "predict"
sc = SpectralClustering()
grid_search = GridSearchCV(sc, param_grid=dict(gamma=[.1, 1, 10]),
scoring='ari')
assert_raise_message(TypeError, "'score' or a 'predict'", grid_search.fit,
[[1]])
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": distributions.uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=distributions.expon(scale=10),
gamma=distributions.expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
"""Test that a fit search can be pickled"""
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
""" Test search with multi-output estimator"""
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters, cv=cv)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters, cv=cv)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
"""Test predict_proba when disabled on estimator."""
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
""" Test GridSearchCV with Imputer """
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
"""GridSearchCV with on_error != 'raise'
Ensures that a warning is raised and score reset where appropriate.
"""
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
"""GridSearchCV with on_error == 'raise' raises the error"""
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
|
bsd-3-clause
|
KennyCandy/HAR
|
_module45/CCPCCC_16_32.py
|
2
|
18306
|
# Note that the dataset must be already downloaded for this script to work, do:
# $ cd data/
# $ python download_dataset.py
# quoc_trinh
import tensorflow as tf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import metrics
import os
import sys
import datetime
# get current file_name as [0] of array
file_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
print(" File Name:")
print(file_name)
print("")
# FLAG to know that whether this is traning process or not.
FLAG = 'train'
POOL_X = 16
POOL_Y = 18
N_HIDDEN_CONFIG = 32
save_path_name = file_name + "/model.ckpt"
print(datetime.datetime.now())
# Write to file: time to start, type, time to end
f = open(file_name + '/time.txt', 'a+')
f.write("------------- \n")
f.write("This is time \n")
f.write("Started at \n")
f.write(str(datetime.datetime.now())+'\n')
if __name__ == "__main__":
# -----------------------------
# step1: load and prepare data
# -----------------------------
# Those are separate normalised input features for the neural network
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
DATA_PATH = "../data/"
DATASET_PATH = DATA_PATH + "UCI HAR Dataset/"
print("\n" + "Dataset is now located at: " + DATASET_PATH)
# Preparing data set:
TRAIN = "train/"
TEST = "test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'rb')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(' ', ' ').strip().split(' ') for row in file
]]
)
file.close()
"""Examples
--------
>> > x = np.arange(4).reshape((2, 2))
>> > x
array([[0, 1],
[2, 3]])
>> > np.transpose(x)
array([[0, 2],
[1, 3]])
>> > x = np.ones((1, 2, 3))
>> > np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
X_train = load_X(X_train_signals_paths) # [7352, 128, 9]
X_test = load_X(X_test_signals_paths) # [7352, 128, 9]
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 128
print(len(X_train[0][0])) # 9
print(type(X_train))
X_train = np.reshape(X_train, [-1, 32, 36])
X_test = np.reshape(X_test, [-1, 32, 36])
print("-----------------X_train---------------")
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 32
print(len(X_train[0][0])) # 36
print(type(X_train))
# exit()
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
def one_hot(label):
"""convert label from dense to one hot
argument:
label: ndarray dense label ,shape: [sample_num,1]
return:
one_hot_label: ndarray one hot, shape: [sample_num,n_class]
"""
label_num = len(label)
new_label = label.reshape(label_num) # shape : [sample_num]
# because max is 5, and we will create 6 columns
n_values = np.max(new_label) + 1
return np.eye(n_values)[np.array(new_label, dtype=np.int32)]
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
file = open(y_path, 'rb')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(' ', ' ').strip().split(' ') for row in file
]],
dtype=np.int32
)
file.close()
# Subtract 1 to each output class for friendly 0-based indexing
return y_ - 1
y_train = one_hot(load_y(y_train_path))
y_test = one_hot(load_y(y_test_path))
print("---------y_train----------")
# print(y_train)
print(len(y_train)) # 7352
print(len(y_train[0])) # 6
# -----------------------------------
# step2: define parameters for model
# -----------------------------------
class Config(object):
"""
define a class to store parameters,
the input should be feature mat of training and testing
"""
def __init__(self, X_train, X_test):
# Input data
self.train_count = len(X_train) # 7352 training series
self.test_data_count = len(X_test) # 2947 testing series
self.n_steps = len(X_train[0]) # 128 time_steps per series
# Training
self.learning_rate = 0.0025
self.lambda_loss_amount = 0.0015
self.training_epochs = 300
self.batch_size = 1000
# LSTM structure
self.n_inputs = len(X_train[0][0]) # Features count is of 9: three 3D sensors features over time
self.n_hidden = N_HIDDEN_CONFIG # nb of neurons inside the neural network
self.n_classes = 6 # Final output classes
self.W = {
'hidden': tf.Variable(tf.random_normal([self.n_inputs, self.n_hidden])), # [9, 32]
'output': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes])) # [32, 6]
}
self.biases = {
'hidden': tf.Variable(tf.random_normal([self.n_hidden], mean=1.0)), # [32]
'output': tf.Variable(tf.random_normal([self.n_classes])) # [6]
}
config = Config(X_train, X_test)
# print("Some useful info to get an insight on dataset's shape and normalisation:")
# print("features shape, labels shape, each features mean, each features standard deviation")
# print(X_test.shape, y_test.shape,
# np.mean(X_test), np.std(X_test))
# print("the dataset is therefore properly normalised, as expected.")
#
#
# ------------------------------------------------------
# step3: Let's get serious and build the neural network
# ------------------------------------------------------
# [none, 128, 9]
X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs])
# [none, 6]
Y = tf.placeholder(tf.float32, [None, config.n_classes])
print("-------X Y----------")
print(X)
X = tf.reshape(X, shape=[-1, 32, 36])
print(X)
print(Y)
Y = tf.reshape(Y, shape=[-1, 6])
print(Y)
# Weight Initialization
def weight_variable(shape):
# tra ve 1 gia tri random theo thuat toan truncated_ normal
initial = tf.truncated_normal(shape, mean=0.0, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial)
def bias_varibale(shape):
initial = tf.constant(0.1, shape=shape, name='Bias')
return tf.Variable(initial)
# Convolution and Pooling
def conv2d(x, W):
# Must have `strides[0] = strides[3] = 1 `.
# For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1] `.
return tf.nn.conv2d(input=x, filter=W, strides=[1, 1, 1, 1], padding='SAME', name='conv_2d')
def max_pool_2x2(x):
return tf.nn.max_pool(value=x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME', name='max_pool')
def LSTM_Network(feature_mat, config):
"""model a LSTM Network,
it stacks 2 LSTM layers, each layer has n_hidden=32 cells
and 1 output layer, it is a full connet layer
argument:
feature_mat: ndarray feature matrix, shape=[batch_size,time_steps,n_inputs]
config: class containing config of network
return:
: matrix output shape [batch_size,n_classes]
"""
W_conv1 = weight_variable([3, 3, 1, 16])
b_conv1 = bias_varibale([16])
# x_image = tf.reshape(x, shape=[-1, 28, 28, 1])
feature_mat_image = tf.reshape(feature_mat, shape=[-1, 32, 36, 1])
print("----feature_mat_image-----")
print(feature_mat_image.get_shape())
h_conv1 = tf.nn.relu(conv2d(feature_mat_image, W_conv1) + b_conv1)
h_pool1 = h_conv1
# Second Convolutional Layer
W_conv2 = weight_variable([3, 3, 16, 16])
b_conv2 = weight_variable([16])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# Third Convolutional Layer
W_conv3 = weight_variable([3, 3, 16, 64])
b_conv3 = weight_variable([64])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
h_pool3 = (h_conv3)
# Forth Convolutional Layer
W_conv4 = weight_variable([3, 3, 64, 64])
b_conv4 = weight_variable([64])
h_conv4 = tf.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4)
h_pool4 = (h_conv4)
# Fifth Convolutional Layer
W_conv5 = weight_variable([3, 3, 64, 64])
b_conv5 = weight_variable([64])
h_conv5 = tf.nn.relu(conv2d(h_pool4, W_conv5) + b_conv5)
h_pool5 = h_conv5
# Sixth Convolutional Layer
W_conv6 = weight_variable([3, 3, 64, 1])
b_conv6 = weight_variable([1])
h_conv6 = tf.nn.relu(conv2d(h_pool5, W_conv6) + b_conv6)
h_pool6 = h_conv6
h_pool6 = tf.reshape(h_pool6, shape=[-1, POOL_X, POOL_Y])
feature_mat = h_pool6
print("----feature_mat-----")
print(feature_mat)
# exit()
# W_fc1 = weight_variable([8 * 9 * 1, 1024])
# b_fc1 = bias_varibale([1024])
# h_pool2_flat = tf.reshape(h_pool2, [-1, 8 * 9 * 1])
# h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# print("----h_fc1_drop-----")
# print(h_fc1)
# exit()
#
# # keep_prob = tf.placeholder(tf.float32)
# keep_prob = tf.placeholder(1.0)
# h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob=keep_prob)
# print("----h_fc1_drop-----")
# print(h_fc1_drop)
# exit()
#
# W_fc2 = weight_variable([1024, 10])
# b_fc2 = bias_varibale([10])
#
# y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# print("----y_conv-----")
# print(y_conv)
# exit()
# Exchange dim 1 and dim 0
# Start at: [0,1,2] = [batch_size, 128, 9] => [batch_size, 32, 36]
feature_mat = tf.transpose(feature_mat, [1, 0, 2])
# New feature_mat's shape: [time_steps, batch_size, n_inputs] [128, batch_size, 9]
print("----feature_mat-----")
print(feature_mat)
# exit()
# Temporarily crush the feature_mat's dimensions
feature_mat = tf.reshape(feature_mat, [-1, config.n_inputs]) # 9
# New feature_mat's shape: [time_steps*batch_size, n_inputs] # 128 * batch_size, 9
# Linear activation, reshaping inputs to the LSTM's number of hidden:
hidden = tf.nn.relu(tf.matmul(
feature_mat, config.W['hidden']
) + config.biases['hidden'])
# New feature_mat (hidden) shape: [time_steps*batch_size, n_hidden] [128*batch_size, 32]
print("--n_steps--")
print(config.n_steps)
print("--hidden--")
print(hidden)
# Split the series because the rnn cell needs time_steps features, each of shape:
hidden = tf.split(0, config.n_steps/4, hidden) # (0, 128, [128*batch_size, 32])
# New hidden's shape: a list of length "time_step" containing tensors of shape [batch_size, n_hidden]
# Define LSTM cell of first hidden layer:
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.n_hidden, forget_bias=1.0)
# Stack two LSTM layers, both layers has the same shape
lsmt_layers = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2)
# Get LSTM outputs, the states are internal to the LSTM cells,they are not our attention here
outputs, _ = tf.nn.rnn(lsmt_layers, hidden, dtype=tf.float32)
# outputs' shape: a list of lenght "time_step" containing tensors of shape [batch_size, n_hidden]
print("------------------list-------------------")
print(outputs)
# Get last time step's output feature for a "many to one" style classifier,
# as in the image describing RNNs at the top of this page
lstm_last_output = outputs[-1] # Get the last element of the array: [?, 32]
print("------------------last outputs-------------------")
print (lstm_last_output)
# Linear activation
return tf.matmul(lstm_last_output, config.W['output']) + config.biases['output']
pred_Y = LSTM_Network(X, config) # shape[?,6]
print("------------------pred_Y-------------------")
print(pred_Y)
# Loss,train_step,evaluation
l2 = config.lambda_loss_amount * \
sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())
# Softmax loss and L2
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(pred_Y, Y)) + l2
train_step = tf.train.AdamOptimizer(
learning_rate=config.learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(pred_Y, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))
# --------------------------------------------
# step4: Hooray, now train the neural network
# --------------------------------------------
# Note that log_device_placement can be turned ON but will cause console spam.
# Initializing the variables
init = tf.initialize_all_variables()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
best_accuracy = 0.0
# sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=False))
if (FLAG == 'train') : # If it is the training mode
with tf.Session() as sess:
# tf.initialize_all_variables().run()
sess.run(init) # .run()
f.write("---Save model \n")
# Start training for each batch and loop epochs
for i in range(config.training_epochs):
for start, end in zip(range(0, config.train_count, config.batch_size), # (0, 7352, 1500)
range(config.batch_size, config.train_count + 1,
config.batch_size)): # (1500, 7353, 1500)
print(start)
print(end)
sess.run(train_step, feed_dict={X: X_train[start:end],
Y: y_train[start:end]})
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
print("traing iter: {},".format(i) + \
" test accuracy : {},".format(accuracy_out) + \
" loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
# Save the model in this session
save_path = saver.save(sess, file_name + "/model.ckpt")
print("Model saved in file: %s" % save_path)
print("")
print("final loss: {}").format(loss_out)
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
# Write all output to file
f.write("final loss:" + str(format(loss_out)) +" \n")
f.write("final test accuracy:" + str(format(accuracy_out)) +" \n")
f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n")
else :
# Running a new session
print("Starting 2nd session...")
with tf.Session() as sess:
# Initialize variables
sess.run(init)
f.write("---Restore model \n")
# Restore model weights from previously saved model
saver.restore(sess, file_name+ "/model.ckpt")
print("Model restored from file: %s" % save_path_name)
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
# print("traing iter: {}," + \
# " test accuracy : {},".format(accuracy_out) + \
# " loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
print("")
print("final loss: {}").format(loss_out)
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
# Write all output to file
f.write("final loss:" + str(format(loss_out)) +" \n")
f.write("final test accuracy:" + str(format(accuracy_out)) +" \n")
f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n")
#
# #------------------------------------------------------------------
# # step5: Training is good, but having visual insight is even better
# #------------------------------------------------------------------
# # The code is in the .ipynb
#
# #------------------------------------------------------------------
# # step6: And finally, the multi-class confusion matrix and metrics!
# #------------------------------------------------------------------
# # The code is in the .ipynb
f.write("Ended at \n")
f.write(str(datetime.datetime.now())+'\n')
f.write("------------- \n")
f.close()
|
mit
|
gabrevaya/Canto5
|
testing/testing_ger_apa.py
|
1
|
2439
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 7 16:34:20 2017
@author: ger
"""
import numpy as np
import matplotlib.pyplot as plt
#import scipy
#import scipy.io.wavfile as sw
from scipy import signal
from scipy import interpolate
#def envolvente(raw_audio,times,sample_rate,data_points,bin_size=100,mean=0,der=0):
def envolvente(raw_audio,times,sample_rate,data_points=None,bin_size=None,mean=0,der=0):
"""
Finds a wrapping-signal of a raw-audio wave, dividing the set of pints in packages of a
given number of points (bin_size), and computing the max or mean value of the signal in
each one of them.
Input: raw_audio, timebase, sample rate and total number of data points of the audio file,
(output of read_wav.py executed on a single file). (Optional): specify bin_size
(=100 by default); specify if you want to consider the maximum (mean=0, default)
or mean (mean=1) value of the data points in bin_size; specify if you want to
derivate the signal setting der=1 (0 by default).
Output: resultant wrapping-signal
"""
if data_points is None:
data_points=len(times)
if bin_size is None:
bin_size=int(data_points/1e3)
#[sample_rate, raw_audio] = sw.read(file)
#raw_audio = np.array(raw_audio)
#data_points = len(raw_audio)
t_fin = data_points/sample_rate
#times = np.arange(0,t_fin,1/sample_rate)
#peakind = signal.find_peaks_cwt(raw_audio, np.arange(1,5))
#pospeakind = [i for i in peakind if raw_audio[i] > 0]
#peaks = np.zeros(len(raw_audio))
#peaks[pospeakind] = raw_audio[pospeakind]
#bin_size = 100
num_of_bins = int(data_points/bin_size)
new_len = num_of_bins*bin_size
segmented = np.reshape(raw_audio[:new_len],[num_of_bins,bin_size])
if mean==1:
smooth_wave = np.mean(np.abs(segmented), axis = 1)
else:
smooth_wave = np.max(segmented, axis = 1)
t_start = int((bin_size*0.5))-1
t_new = times[t_start:new_len:bin_size]
tck = interpolate.splrep(t_new, smooth_wave, s=0)
envolvente = interpolate.splev(times, tck, der)
plt.figure(figsize=(30,10))
plt.plot(times,raw_audio)
#plt.plot(t_new,smooth_wave)
plt.plot(times, envolvente, linewidth=3)
plt.xlabel('tiempo (s)')
plt.ylabel('amplitud')
#plt.plot(times,peaks,'.')
#return envolvente, times, ..
pass
|
gpl-3.0
|
Kaspect/polar
|
main.py
|
1
|
1895
|
import pandas as pd
import numpy as np
def main():
polar_data = download_s3_data_from_polar()
file_freq = byte_frequency_analysis("samplefile.txt")
byte_frequency_distribution_correlation()
byte_frequency_cross_correlation()
file_header_trailer()
produce_json_output_for_d3()
produce_json_output_for_pie_chart()
run_tika_simularity(polar_data, distances=['jaccard','cosine','edit_similarity'])
run_content_based_mime_detector(polar_data)
#@param path_to_file some file that we want a byte frequency analysis of
#@return fingerprint 2D dataset with columns 'byte_length' and 'count'
#finger print will be in this format: pandas.DataFrame([0, 1,2,3,4,5], [43,26,33,25,32,1])
def byte_frequency_analysis(path_to_file):
return 0
#@param path_to_file some file that we want a byte frequency correlations of
#@param fingerprint_array list of fingerprints, each with an identifier (see byte_frequency_cross_correlation)
#@return ????????????????
def byte_frequency_distribution_correlation(path_to_file,fingerprint_array):
return 0
#@param path_to_file some file that we want a byte frequency correlations of
#@param fingerprint_array list of fingerprints, each with an identifier
#in this format:
#[
# ('type1', pandas.DataFrame([0, 1,2,3,4,5], [43,26,33,25,32,1])),
# ('type2', pandas.DataFrame([0, 1,2,3,4,5], [43,26,33,25,32,1])),
# ('type3', pandas.DataFrame([0, 1,2,3,4,5], [43,26,33,25,32,1]))
# ]
#@return ????????????????
def byte_frequency_cross_correlation(path_to_file,fingerprint_array):
return 0
#@param path_to_file string path to the file for analysis
#@param bytes_to_analyze either 4,8,or 16 as an integer, indicating how many of the first bytes we should analyze
#@return sparse densematrix of what?????????????
def file_header_trailer(path_to_file,bytes_to_analyze):
return 0
def download_s3_data_from_polar():
return(0)
main()
|
apache-2.0
|
soazig/project-epsilon-1
|
code/utils/scripts/glm_script.py
|
2
|
4810
|
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/"))
import numpy as np
from glm import *
#from convolution_normal_script import X_matrix
#from convolution_high_res_script import X_matrix_high_res
#from load_BOLD import *
import nibabel as nib
import matplotlib.pyplot as plt
from smoothing import *
# Create the necessary directories if they do not exist
dirs = ['../../../txt_output/mrss',\
'../../../fig/glm_fitted']
for d in dirs:
if not os.path.exists(d):
os.makedirs(d)
# Locate the different paths
project_path = '../../../'
# TODO: change it to relevant path
conv_path = project_path + 'txt_output/conv_normal/'
conv_high_res_path = project_path + 'txt_output/conv_high_res/'
# select your own subject
subject_list = [str(i) for i in range(1,17)]
#subject_list = ['1','5']
run_list = [str(i) for i in range(1,2)]
conv_list = [str(i) for i in range(1,5)]
txt_paths = [('ds005_sub' + s.zfill(3) + '_t1r' + r +'_conv'+ c.zfill(3),\
conv_path + 'ds005_sub' + s.zfill(3) + '_t1r' + r +'_conv001_canonical.txt', \
conv_path + 'ds005_sub' + s.zfill(3) + '_t1r' + r +'_conv002_canonical.txt', \
conv_path + 'ds005_sub' + s.zfill(3) + '_t1r' + r +'_conv003_canonical.txt', \
conv_path + 'ds005_sub' + s.zfill(3) + '_t1r' + r +'_conv004_canonical.txt', \
'../../../data/ds005/sub' + s.zfill(3) + '/BOLD/task001_run' \
+ r.zfill(3) + '/bold.nii.gz',\
conv_high_res_path + 'ds005_sub' + s.zfill(3) + '_t1r' + r +'_conv_001_high_res.txt',\
conv_high_res_path + 'ds005_sub' + s.zfill(3) + '_t1r' + r +'_conv_002_high_res.txt',\
conv_high_res_path + 'ds005_sub' + s.zfill(3) + '_t1r' + r +'_conv_003_high_res.txt',\
conv_high_res_path + 'ds005_sub' + s.zfill(3) + '_t1r' + r +'_conv_004_high_res.txt') \
for r in run_list \
for s in subject_list \
for c in conv_list]
print("\n====================================================")
for txt_path in txt_paths:
# get 4_d image data
name = txt_path[0]
print("Starting glm analysis for subject " +name[9:12]+ " condition " + name[24])
img = nib.load(txt_path[5])
data_int = img.get_data()
data = data_int.astype(float)
p = 5
# p is the number of columns in our design matrix
# it is the number of convolved column plus 1 (a column of 1's)
X_matrix1 = np.loadtxt(txt_path[1])
X_matrix2 = np.loadtxt(txt_path[2])
X_matrix3 = np.loadtxt(txt_path[3])
X_matrix4 = np.loadtxt(txt_path[4])
X_matrix = np.ones((len(X_matrix1),p))
X_matrix[...,1] = X_matrix1
X_matrix[...,2] = X_matrix2
X_matrix[...,3] = X_matrix3
X_matrix[...,4] = X_matrix4
X_matrix_high_res1 = np.loadtxt(txt_path[6])
X_matrix_high_res2 = np.loadtxt(txt_path[7])
X_matrix_high_res3 = np.loadtxt(txt_path[8])
X_matrix_high_res4 = np.loadtxt(txt_path[9])
X_matrix_high_res = np.ones((len(X_matrix1),p))
X_matrix_high_res[...,1] = X_matrix_high_res1
X_matrix_high_res[...,2] = X_matrix_high_res2
X_matrix_high_res[...,3] = X_matrix_high_res3
X_matrix_high_res[...,4] = X_matrix_high_res4
beta_4d = glm_beta(data,X_matrix)
MRSS, fitted, residuals = glm_mrss(beta_4d, X_matrix, data)
# smooth the data and re-run the regression
data_smooth = smoothing(data,1,range(data.shape[-1]))
beta_4d_smooth = glm_beta(data_smooth,X_matrix)
MRSS_smooth, fitted_smooth, residuals_smooth = glm_mrss(beta_4d_smooth, X_matrix, data_smooth)
# use high resolution to create our design matrix
beta_4d_high_res = glm_beta(data,X_matrix_high_res)
MRSS_high_res, fitted_high_res, residuals_high_res = glm_mrss(beta_4d_high_res, X_matrix_high_res, data)
plt.plot(data[4,22,11], label = "actual")
plt.plot(fitted[4,22,11], label = "fitted")
plt.plot(fitted_high_res[4,22,11], label = "fitted_high_res")
plt.title(name[0:17]+"voxel (4,22,11) actual vs fitted")
plt.legend(loc = "upper left", fontsize = "smaller")
plt.savefig(dirs[1] + '/'+ name[0:17]+ "_glm_fitted.png")
plt.close()
location_of_txt= dirs[0]
file = open(location_of_txt+ '/' + name[0:17] +'_mrss_result.txt', "w")
file.write("MRSS of multiple regression for" +name[0:17]+ " is: "+str(np.mean(MRSS))+"\n")
file.write("\n")
file.write("MRSS of multiple regression for" +name[0:17]+ " using the smoothed data is: "+str(np.mean(MRSS_smooth))+"\n")
file.write("\n")
file.write("MRSS of multiple regression for" +name[0:17]+ " using high_res design matrix is: "+str(np.mean(MRSS_high_res))+"\n")
file.close()
print("GLM analysis done.")
print("See MRSS project-epsilon/txt_output/mrss/" + name[0:17])
|
bsd-3-clause
|
qianfengzh/ML-source-code
|
algorithms/decisionTrees/trees.py
|
1
|
4520
|
#-*-coding=utf-8-*-
#-----------------------
# Named: Decision Tree
# Created: 2016-07-10
# @Author: Qianfeng
#-----------------------
from math import log
import operator
import pickle
from sklearn.tree import DecisionTreeClassifier
def createDataSet():
dataSet = [[1,1,'yes'],[1,1,'yes'],[1,0,'no'],[0,1,'no'],[0,1,'no']]
labels = ['no surfacing', 'flippers']
return dataSet, labels
def calcShannonEnt(dataSet):
"""
计算香农熵
"""
numEntries = len(dataSet)
labelCounts = {}
for featVect in dataSet:
currentLabel = featVect[-1]
labelCounts[currentLabel] = labelCounts.get(currentLabel,0)+1
shannonEnt = 0.0
for key in labelCounts:
prob = float(labelCounts[key])/numEntries
shannonEnt -= prob * log(prob,2)
return shannonEnt
def splitDataSet(dataSet, axis, value):
"""
将数据按指定轴和指定值进行切分
"""
retDataSet = []
for featVect in dataSet:
if featVect[axis] == value:
reducedFeatVec = featVect[:axis]
reducedFeatVec.extend(featVect[axis+1:])
retDataSet.append(reducedFeatVec)
return retDataSet
def chooseBestFeatureToSplit(dataSet):
"""
选取最佳划分方式
"""
numFeatures = len(dataSet[0]) - 1
baseEntropy = calcShannonEnt(dataSet)
bestInfoGain = 0.0; bestFeature = -1
for i in range(numFeatures):
featList = [example[i] for example in dataSet]
uniqueValues = set(featList)
newEntropy = 0.0
for value in uniqueValues:
subDataSet = splitDataSet(dataSet, i, value)
prob = len(subDataSet)/float(len(dataSet))
newEntropy += prob * calcShannonEnt(subDataSet)
infoGain = baseEntropy - newEntropy
if (infoGain > bestInfoGain):
bestInfoGain = infoGain
bestFeature = i
return bestFeature
def Gini(dataSet):
giniValue = 0.0
labelsList = dataSet[-1]
labelsCount = len(dataSet)
uniqueValues = set(dataSet[-1])
for value in uniqueValues:
giniValue += pow(labelsList.count(value)/float(labelsCount), 2)
return 1-giniValue
def chooseBestFeatureToSplitByGini(dataSet):
numFeatures = len(dataSet[0]) - 1
baseGini = Gini(dataSet)
bestGiniGain = 0.0; bestFeature = -1
for i in range(numFeatures):
featList = [example[i] for example in dataSet]
uniqueValues = set(featList)
newGini = 0.0
for value in uniqueValues:
subDataSet = splitDataSet(dataSet, i, value)
prob = len(subDataSet) / float(len(dataSet))
newGini += prob * Gini(subDataSet)
giniGain = baseGini - newGini
if (giniGain > bestGiniGain):
bestGiniGain = giniGain
bestFeature = i
return bestFeature
def majorityCnt(classList):
"""
叶节点多数表决策略
"""
classCount = {}
for vote in classList:
classCount[vote] = classCount.get(vote,0) + 1
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
def createTree(dataSet, labels):
"""
创建树
"""
classList = [example[-1] for example in dataSet]
if classList.count(classList[0]) == len(classList): # 类别完全相同,停止划分
return classList[0]
if len(dataSet[0]) == 1: # 遍历完所有特征
return majorityCnt(classList)
bestFeat = chooseBestFeatureToSplitByGini(dataSet)
bestFeatLabel = labels[bestFeat]
myTree = {bestFeatLabel:{}}
del (labels[bestFeat])
featValues = [example[bestFeat] for example in dataSet]
uniqueValues = set(featValues)
for value in uniqueValues:
subLabels = labels[:]
myTree[bestFeatLabel][value] = \
createTree(splitDataSet(dataSet, bestFeat, value), subLabels)
return myTree
#---------------------------------------------
# 使用决策树进行分类
def classify(inputTree, featLabels, testVect):
firstStr = inputTree.keys()[0]
secondDict = inputTree[firstStr]
featIndex = featLabels.index(firstStr)
for key in secondDict.keys():
if testVect[featIndex] == key:
if type(secondDict[key]).__name__ == 'dict':
classLabel = classify(secondDict[key], featLabels, testVect)
else:
classLabel = secondDict[key]
return classLabel
#---------------------------------------------
#使用 pickle 模块存储决策树
def storeTree(inputTree, filename):
with open(filename, 'w') as fw:
pickle.dump(inputTree, fw)
def loadTree(filename):
with open(filename) as fr:
return pickle.load(fr)
# -------------------------------------------
# 使用决策树实现
def decisionTreeBySklearn(dataSet):
clf = DecisionTreeClassifier(random_state=0)
clf.fit(dataSet[:,:2], dataSet[:,-1])
print clf.predict([0.1,1.1])
print clf.score(dataSet[:,:2], dataSet[:,-1])
print clf.predict_proba([[0.1,1.1],[1.1,1.2]])
|
gpl-2.0
|
nvoron23/scikit-learn
|
examples/cluster/plot_adjusted_for_chance_measures.py
|
286
|
4353
|
"""
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
|
bsd-3-clause
|
eleftherioszisis/NeuroM
|
neurom/view/view.py
|
1
|
15032
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 501ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''visualize morphologies'''
from matplotlib.collections import LineCollection, PatchCollection
from matplotlib.lines import Line2D
from matplotlib.patches import Circle, FancyArrowPatch, Polygon
from mpl_toolkits.mplot3d.art3d import \
Line3DCollection # pylint: disable=relative-import
import numpy as np
from neurom import NeuriteType, geom
from neurom._compat import zip
from neurom.core import iter_neurites, iter_segments
from neurom.core._soma import SomaCylinders
from neurom.core.dataformat import COLS
from neurom.core.types import tree_type_checker
from neurom.morphmath import segment_radius
from neurom.view.dendrogram import Dendrogram, layout_dendrogram, get_size, move_positions
from . import common
_LINEWIDTH = 1.2
_ALPHA = 0.8
_DIAMETER_SCALE = 1.0
TREE_COLOR = {NeuriteType.basal_dendrite: 'red',
NeuriteType.apical_dendrite: 'purple',
NeuriteType.axon: 'blue',
NeuriteType.soma: 'black',
NeuriteType.undefined: 'green'}
def _plane2col(plane):
'''take a string like 'xy', and return the indices from COLS.*'''
planes = ('xy', 'yx', 'xz', 'zx', 'yz', 'zy')
assert plane in planes, 'No such plane found! Please select one of: ' + str(planes)
return (getattr(COLS, plane[0].capitalize()),
getattr(COLS, plane[1].capitalize()), )
def _get_linewidth(tree, linewidth, diameter_scale):
'''calculate the desired linewidth based on tree contents
If diameter_scale exists, it is used to scale the diameter of each of the segments
in the tree
If diameter_scale is None, the linewidth is used.
'''
if diameter_scale is not None and tree:
linewidth = [2 * segment_radius(s) * diameter_scale
for s in iter_segments(tree)]
return linewidth
def _get_color(treecolor, tree_type):
"""if treecolor set, it's returned, otherwise tree_type is used to return set colors"""
if treecolor is not None:
return treecolor
return TREE_COLOR.get(tree_type, 'green')
def plot_tree(ax, tree, plane='xy',
diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH,
color=None, alpha=_ALPHA):
'''Plots a 2d figure of the tree's segments
Args:
ax(matplotlib axes): on what to plot
tree(neurom.core.Tree or neurom.core.Neurite): plotted tree
plane(str): Any pair of 'xyz'
diameter_scale(float): Scale factor multiplied with segment diameters before plotting
linewidth(float): all segments are plotted with this width, but only if diameter_scale=None
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values
Note:
If the tree contains one single point the plot will be empty
since no segments can be constructed.
'''
plane0, plane1 = _plane2col(plane)
segs = [((s[0][plane0], s[0][plane1]),
(s[1][plane0], s[1][plane1]))
for s in iter_segments(tree)]
linewidth = _get_linewidth(tree, diameter_scale=diameter_scale, linewidth=linewidth)
color = _get_color(color, tree.type)
collection = LineCollection(segs, color=color, linewidth=linewidth, alpha=alpha)
ax.add_collection(collection)
def plot_soma(ax, soma, plane='xy',
soma_outline=True,
linewidth=_LINEWIDTH,
color=None, alpha=_ALPHA):
'''Generates a 2d figure of the soma.
Args:
ax(matplotlib axes): on what to plot
soma(neurom.core.Soma): plotted soma
plane(str): Any pair of 'xyz'
diameter_scale(float): Scale factor multiplied with segment diameters before plotting
linewidth(float): all segments are plotted with this width, but only if diameter_scale=None
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values
'''
plane0, plane1 = _plane2col(plane)
color = _get_color(color, tree_type=NeuriteType.soma)
if isinstance(soma, SomaCylinders):
plane0, plane1 = _plane2col(plane)
for start, end in zip(soma.points, soma.points[1:]):
common.project_cylinder_onto_2d(ax, (plane0, plane1),
start=start[COLS.XYZ], end=end[COLS.XYZ],
start_radius=start[COLS.R], end_radius=end[COLS.R],
color=color, alpha=alpha)
else:
if soma_outline:
ax.add_artist(Circle(soma.center[[plane0, plane1]], soma.radius,
color=color, alpha=alpha))
else:
plane0, plane1 = _plane2col(plane)
points = [(p[plane0], p[plane1]) for p in soma.iter()]
if points:
points.append(points[0]) # close the loop
ax.plot(points, color=color, alpha=alpha, linewidth=linewidth)
ax.set_xlabel(plane[0])
ax.set_ylabel(plane[1])
bounding_box = geom.bounding_box(soma)
ax.dataLim.update_from_data_xy(np.vstack(([bounding_box[0][plane0], bounding_box[0][plane1]],
[bounding_box[1][plane0], bounding_box[1][plane1]])),
ignore=False)
# pylint: disable=too-many-arguments
def plot_neuron(ax, nrn,
neurite_type=NeuriteType.all,
plane='xy',
soma_outline=True,
diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH,
color=None, alpha=_ALPHA):
'''Plots a 2D figure of the neuron, that contains a soma and the neurites
Args:
ax(matplotlib axes): on what to plot
neurite_type(NeuriteType): an optional filter on the neurite type
nrn(neuron): neuron to be plotted
soma_outline(bool): should the soma be drawn as an outline
plane(str): Any pair of 'xyz'
diameter_scale(float): Scale factor multiplied with segment diameters before plotting
linewidth(float): all segments are plotted with this width, but only if diameter_scale=None
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values
'''
plot_soma(ax, nrn.soma, plane=plane, soma_outline=soma_outline, linewidth=linewidth,
color=color, alpha=alpha)
for neurite in iter_neurites(nrn, filt=tree_type_checker(neurite_type)):
plot_tree(ax, neurite, plane=plane,
diameter_scale=diameter_scale, linewidth=linewidth,
color=color, alpha=alpha)
ax.set_title(nrn.name)
ax.set_xlabel(plane[0])
ax.set_ylabel(plane[1])
def _update_3d_datalim(ax, obj):
'''unlike w/ 2d Axes, the dataLim isn't set by collections, so it has to be updated manually'''
min_bounding_box, max_bounding_box = geom.bounding_box(obj)
xy_bounds = np.vstack((min_bounding_box[:COLS.Z],
max_bounding_box[:COLS.Z]))
ax.xy_dataLim.update_from_data_xy(xy_bounds, ignore=False)
z_bounds = np.vstack(((min_bounding_box[COLS.Z], min_bounding_box[COLS.Z]),
(max_bounding_box[COLS.Z], max_bounding_box[COLS.Z])))
ax.zz_dataLim.update_from_data_xy(z_bounds, ignore=False)
def plot_tree3d(ax, tree,
diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH,
color=None, alpha=_ALPHA):
'''Generates a figure of the tree in 3d.
If the tree contains one single point the plot will be empty \
since no segments can be constructed.
Args:
ax(matplotlib axes): on what to plot
tree(neurom.core.Tree or neurom.core.Neurite): plotted tree
diameter_scale(float): Scale factor multiplied with segment diameters before plotting
linewidth(float): all segments are plotted with this width, but only if diameter_scale=None
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values
'''
segs = [(s[0][COLS.XYZ], s[1][COLS.XYZ]) for s in iter_segments(tree)]
linewidth = _get_linewidth(tree, diameter_scale=diameter_scale, linewidth=linewidth)
color = _get_color(color, tree.type)
collection = Line3DCollection(segs, color=color, linewidth=linewidth, alpha=alpha)
ax.add_collection3d(collection)
_update_3d_datalim(ax, tree)
def plot_soma3d(ax, soma, color=None, alpha=_ALPHA):
'''Generates a 3d figure of the soma.
Args:
ax(matplotlib axes): on what to plot
soma(neurom.core.Soma): plotted soma
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values
'''
color = _get_color(color, tree_type=NeuriteType.soma)
if isinstance(soma, SomaCylinders):
for start, end in zip(soma.points, soma.points[1:]):
common.plot_cylinder(ax,
start=start[COLS.XYZ], end=end[COLS.XYZ],
start_radius=start[COLS.R], end_radius=end[COLS.R],
color=color, alpha=alpha)
else:
common.plot_sphere(ax, center=soma.center[COLS.XYZ], radius=soma.radius,
color=color, alpha=alpha)
# unlike w/ 2d Axes, the dataLim isn't set by collections, so it has to be updated manually
_update_3d_datalim(ax, soma)
def plot_neuron3d(ax, nrn, neurite_type=NeuriteType.all,
diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH,
color=None, alpha=_ALPHA):
'''
Generates a figure of the neuron,
that contains a soma and a list of trees.
Args:
ax(matplotlib axes): on what to plot
nrn(neuron): neuron to be plotted
neurite_type(NeuriteType): an optional filter on the neurite type
diameter_scale(float): Scale factor multiplied with segment diameters before plotting
linewidth(float): all segments are plotted with this width, but only if diameter_scale=None
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values
'''
plot_soma3d(ax, nrn.soma, color=color, alpha=alpha)
for neurite in iter_neurites(nrn, filt=tree_type_checker(neurite_type)):
plot_tree3d(ax, neurite,
diameter_scale=diameter_scale, linewidth=linewidth,
color=color, alpha=alpha)
ax.set_title(nrn.name)
def _get_dendrogram_legend(dendrogram):
'''Generates labels legend for dendrogram.
Because dendrogram is rendered as patches, we need to manually label it.
Args:
dendrogram (Dendrogram): dendrogram
Returns:
List of legend handles.
'''
def neurite_legend(neurite_type):
return Line2D([0], [0], color=TREE_COLOR[neurite_type], lw=2, label=neurite_type.name)
if dendrogram.neurite_type == NeuriteType.soma:
handles = {d.neurite_type: neurite_legend(d.neurite_type)
for d in [dendrogram] + dendrogram.children}
return handles.values()
return [neurite_legend(dendrogram.neurite_type)]
def _as_dendrogram_polygon(coords, color):
return Polygon(coords, color=color, fill=True)
def _as_dendrogram_line(start, end, color):
return FancyArrowPatch(start, end, arrowstyle='-', color=color, lw=2, shrinkA=0, shrinkB=0)
def _get_dendrogram_shapes(dendrogram, positions, show_diameter):
'''Generates drawable patches for dendrogram.
Args:
dendrogram (Dendrogram): dendrogram
positions (dict of Dendrogram: np.array): positions xy coordinates of dendrograms
show_diameter (bool): whether to draw shapes with diameter or as plain lines
Returns:
List of matplotlib.patches.
'''
color = TREE_COLOR[dendrogram.neurite_type]
start_point = positions[dendrogram]
end_point = start_point + [0, dendrogram.height]
if show_diameter:
shapes = [_as_dendrogram_polygon(dendrogram.coords + start_point, color)]
else:
shapes = [_as_dendrogram_line(start_point, end_point, color)]
for child in dendrogram.children:
shapes.append(_as_dendrogram_line(end_point, positions[child], color))
shapes += _get_dendrogram_shapes(child, positions, show_diameter)
return shapes
def plot_dendrogram(ax, obj, show_diameters=True):
'''Plots Dendrogram of `obj`.
Args:
ax: matplotlib axes
obj (neurom.Neuron, neurom.Tree): neuron or tree
show_diameters (bool): whether to show node diameters or not
'''
dendrogram = Dendrogram(obj)
positions = layout_dendrogram(dendrogram, np.array([0, 0]))
w, h = get_size(positions)
positions = move_positions(positions, np.array([.5 * w, 0]))
ax.set_xlim([-.05 * w, 1.05 * w])
ax.set_ylim([-.05 * h, 1.05 * h])
ax.set_title('Morphology Dendrogram')
ax.set_xlabel('micrometers (um)')
ax.set_ylabel('micrometers (um)')
shapes = _get_dendrogram_shapes(dendrogram, positions, show_diameters)
ax.add_collection(PatchCollection(shapes, match_original=True))
ax.set_aspect('auto')
ax.legend(handles=_get_dendrogram_legend(dendrogram))
|
bsd-3-clause
|
playerkk/face-py-faster-rcnn
|
tools/train_svms.py
|
16
|
13480
|
#!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Train post-hoc SVMs using the algorithm and hyper-parameters from
traditional R-CNN.
"""
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file
from datasets.factory import get_imdb
from fast_rcnn.test import im_detect
from utils.timer import Timer
import caffe
import argparse
import pprint
import numpy as np
import numpy.random as npr
import cv2
from sklearn import svm
import os, sys
class SVMTrainer(object):
"""
Trains post-hoc detection SVMs for all classes using the algorithm
and hyper-parameters of traditional R-CNN.
"""
def __init__(self, net, imdb):
self.imdb = imdb
self.net = net
self.layer = 'fc7'
self.hard_thresh = -1.0001
self.neg_iou_thresh = 0.3
dim = net.params['cls_score'][0].data.shape[1]
scale = self._get_feature_scale()
print('Feature dim: {}'.format(dim))
print('Feature scale: {:.3f}'.format(scale))
self.trainers = [SVMClassTrainer(cls, dim, feature_scale=scale)
for cls in imdb.classes]
def _get_feature_scale(self, num_images=100):
TARGET_NORM = 20.0 # Magic value from traditional R-CNN
_t = Timer()
roidb = self.imdb.roidb
total_norm = 0.0
count = 0.0
inds = npr.choice(xrange(self.imdb.num_images), size=num_images,
replace=False)
for i_, i in enumerate(inds):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
total_norm += np.sqrt((feat ** 2).sum(axis=1)).sum()
count += feat.shape[0]
print('{}/{}: avg feature norm: {:.3f}'.format(i_ + 1, num_images,
total_norm / count))
return TARGET_NORM * 1.0 / (total_norm / count)
def _get_pos_counts(self):
counts = np.zeros((len(self.imdb.classes)), dtype=np.int)
roidb = self.imdb.roidb
for i in xrange(len(roidb)):
for j in xrange(1, self.imdb.num_classes):
I = np.where(roidb[i]['gt_classes'] == j)[0]
counts[j] += len(I)
for j in xrange(1, self.imdb.num_classes):
print('class {:s} has {:d} positives'.
format(self.imdb.classes[j], counts[j]))
return counts
def get_pos_examples(self):
counts = self._get_pos_counts()
for i in xrange(len(counts)):
self.trainers[i].alloc_pos(counts[i])
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
gt_inds = np.where(roidb[i]['gt_classes'] > 0)[0]
gt_boxes = roidb[i]['boxes'][gt_inds]
_t.tic()
scores, boxes = im_detect(self.net, im, gt_boxes)
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
cls_inds = np.where(roidb[i]['gt_classes'][gt_inds] == j)[0]
if len(cls_inds) > 0:
cls_feat = feat[cls_inds, :]
self.trainers[j].append_pos(cls_feat)
print 'get_pos_examples: {:d}/{:d} {:.3f}s' \
.format(i + 1, len(roidb), _t.average_time)
def initialize_net(self):
# Start all SVM parameters at zero
self.net.params['cls_score'][0].data[...] = 0
self.net.params['cls_score'][1].data[...] = 0
# Initialize SVMs in a smart way. Not doing this because its such
# a good initialization that we might not learn something close to
# the SVM solution.
# # subtract background weights and biases for the foreground classes
# w_bg = self.net.params['cls_score'][0].data[0, :]
# b_bg = self.net.params['cls_score'][1].data[0]
# self.net.params['cls_score'][0].data[1:, :] -= w_bg
# self.net.params['cls_score'][1].data[1:] -= b_bg
# # set the background weights and biases to 0 (where they shall remain)
# self.net.params['cls_score'][0].data[0, :] = 0
# self.net.params['cls_score'][1].data[0] = 0
def update_net(self, cls_ind, w, b):
self.net.params['cls_score'][0].data[cls_ind, :] = w
self.net.params['cls_score'][1].data[cls_ind] = b
def train_with_hard_negatives(self):
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
hard_inds = \
np.where((scores[:, j] > self.hard_thresh) &
(roidb[i]['gt_overlaps'][:, j].toarray().ravel() <
self.neg_iou_thresh))[0]
if len(hard_inds) > 0:
hard_feat = feat[hard_inds, :].copy()
new_w_b = \
self.trainers[j].append_neg_and_retrain(feat=hard_feat)
if new_w_b is not None:
self.update_net(j, new_w_b[0], new_w_b[1])
print(('train_with_hard_negatives: '
'{:d}/{:d} {:.3f}s').format(i + 1, len(roidb),
_t.average_time))
def train(self):
# Initialize SVMs using
# a. w_i = fc8_w_i - fc8_w_0
# b. b_i = fc8_b_i - fc8_b_0
# c. Install SVMs into net
self.initialize_net()
# Pass over roidb to count num positives for each class
# a. Pre-allocate arrays for positive feature vectors
# Pass over roidb, computing features for positives only
self.get_pos_examples()
# Pass over roidb
# a. Compute cls_score with forward pass
# b. For each class
# i. Select hard negatives
# ii. Add them to cache
# c. For each class
# i. If SVM retrain criteria met, update SVM
# ii. Install new SVM into net
self.train_with_hard_negatives()
# One final SVM retraining for each class
# Install SVMs into net
for j in xrange(1, self.imdb.num_classes):
new_w_b = self.trainers[j].append_neg_and_retrain(force=True)
self.update_net(j, new_w_b[0], new_w_b[1])
class SVMClassTrainer(object):
"""Manages post-hoc SVM training for a single object class."""
def __init__(self, cls, dim, feature_scale=1.0,
C=0.001, B=10.0, pos_weight=2.0):
self.pos = np.zeros((0, dim), dtype=np.float32)
self.neg = np.zeros((0, dim), dtype=np.float32)
self.B = B
self.C = C
self.cls = cls
self.pos_weight = pos_weight
self.dim = dim
self.feature_scale = feature_scale
self.svm = svm.LinearSVC(C=C, class_weight={1: 2, -1: 1},
intercept_scaling=B, verbose=1,
penalty='l2', loss='l1',
random_state=cfg.RNG_SEED, dual=True)
self.pos_cur = 0
self.num_neg_added = 0
self.retrain_limit = 2000
self.evict_thresh = -1.1
self.loss_history = []
def alloc_pos(self, count):
self.pos_cur = 0
self.pos = np.zeros((count, self.dim), dtype=np.float32)
def append_pos(self, feat):
num = feat.shape[0]
self.pos[self.pos_cur:self.pos_cur + num, :] = feat
self.pos_cur += num
def train(self):
print('>>> Updating {} detector <<<'.format(self.cls))
num_pos = self.pos.shape[0]
num_neg = self.neg.shape[0]
print('Cache holds {} pos examples and {} neg examples'.
format(num_pos, num_neg))
X = np.vstack((self.pos, self.neg)) * self.feature_scale
y = np.hstack((np.ones(num_pos),
-np.ones(num_neg)))
self.svm.fit(X, y)
w = self.svm.coef_
b = self.svm.intercept_[0]
scores = self.svm.decision_function(X)
pos_scores = scores[:num_pos]
neg_scores = scores[num_pos:]
pos_loss = (self.C * self.pos_weight *
np.maximum(0, 1 - pos_scores).sum())
neg_loss = self.C * np.maximum(0, 1 + neg_scores).sum()
reg_loss = 0.5 * np.dot(w.ravel(), w.ravel()) + 0.5 * b ** 2
tot_loss = pos_loss + neg_loss + reg_loss
self.loss_history.append((tot_loss, pos_loss, neg_loss, reg_loss))
for i, losses in enumerate(self.loss_history):
print((' {:d}: obj val: {:.3f} = {:.3f} '
'(pos) + {:.3f} (neg) + {:.3f} (reg)').format(i, *losses))
# Sanity check
scores_ret = (
X * 1.0 / self.feature_scale).dot(w.T * self.feature_scale) + b
assert np.allclose(scores, scores_ret[:, 0], atol=1e-5), \
"Scores from returned model don't match decision function"
return ((w * self.feature_scale, b), pos_scores, neg_scores)
def append_neg_and_retrain(self, feat=None, force=False):
if feat is not None:
num = feat.shape[0]
self.neg = np.vstack((self.neg, feat))
self.num_neg_added += num
if self.num_neg_added > self.retrain_limit or force:
self.num_neg_added = 0
new_w_b, pos_scores, neg_scores = self.train()
# scores = np.dot(self.neg, new_w_b[0].T) + new_w_b[1]
# easy_inds = np.where(neg_scores < self.evict_thresh)[0]
not_easy_inds = np.where(neg_scores >= self.evict_thresh)[0]
if len(not_easy_inds) > 0:
self.neg = self.neg[not_easy_inds, :]
# self.neg = np.delete(self.neg, easy_inds)
print(' Pruning easy negatives')
print(' Cache holds {} pos examples and {} neg examples'.
format(self.pos.shape[0], self.neg.shape[0]))
print(' {} pos support vectors'.format((pos_scores <= 1).sum()))
print(' {} neg support vectors'.format((neg_scores >= -1).sum()))
return new_w_b
else:
return None
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train SVMs (old skool)')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
# Must turn this off to prevent issues when digging into the net blobs to
# pull out features (tricky!)
cfg.DEDUP_BOXES = 0
# Must turn this on because we use the test im_detect() method to harvest
# hard negatives
cfg.TEST.SVM = True
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
print('Using config:')
pprint.pprint(cfg)
# fix the random seed for reproducibility
np.random.seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
if args.gpu_id is not None:
caffe.set_device(args.gpu_id)
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
out = os.path.splitext(os.path.basename(args.caffemodel))[0] + '_svm'
out_dir = os.path.dirname(args.caffemodel)
imdb = get_imdb(args.imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
# enhance roidb to contain flipped examples
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_images()
print 'done'
SVMTrainer(net, imdb).train()
filename = '{}/{}.caffemodel'.format(out_dir, out)
net.save(filename)
print 'Wrote svm model to: {:s}'.format(filename)
|
mit
|
lbishal/scikit-learn
|
sklearn/metrics/tests/test_pairwise.py
|
17
|
25508
|
import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels(): # Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear",
"chi2", "additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {'gamma': 0.1}
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_laplacian_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = laplacian_kernel(X, X)
# the diagonal elements of a laplacian kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(5))
# off-diagonal elements are < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
|
bsd-3-clause
|
stylianos-kampakis/scikit-learn
|
examples/ensemble/plot_gradient_boosting_oob.py
|
230
|
4762
|
"""
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
|
bsd-3-clause
|
JetBrains/intellij-community
|
python/helpers/pydev/_pydevd_bundle/pydevd_utils.py
|
3
|
21038
|
from __future__ import nested_scopes
import os
import traceback
import pydevd_file_utils
try:
from urllib import quote
except:
from urllib.parse import quote # @UnresolvedImport
try:
from collections import OrderedDict
except:
OrderedDict = dict
import inspect
from _pydevd_bundle.pydevd_constants import BUILTINS_MODULE_NAME, IS_PY38_OR_GREATER, dict_iter_items, get_global_debugger, IS_PY3K, LOAD_VALUES_POLICY, \
ValuesPolicy
import sys
from _pydev_bundle import pydev_log
from _pydev_imps._pydev_saved_modules import threading
def _normpath(filename):
return pydevd_file_utils.get_abs_path_real_path_and_base_from_file(filename)[0]
def save_main_module(file, module_name):
# patch provided by: Scott Schlesier - when script is run, it does not
# use globals from pydevd:
# This will prevent the pydevd script from contaminating the namespace for the script to be debugged
# pretend pydevd is not the main module, and
# convince the file to be debugged that it was loaded as main
sys.modules[module_name] = sys.modules['__main__']
sys.modules[module_name].__name__ = module_name
try:
from importlib.machinery import ModuleSpec
from importlib.util import module_from_spec
m = module_from_spec(ModuleSpec('__main__', loader=None))
except:
# A fallback for Python <= 3.4
from imp import new_module
m = new_module('__main__')
sys.modules['__main__'] = m
if hasattr(sys.modules[module_name], '__loader__'):
m.__loader__ = getattr(sys.modules[module_name], '__loader__')
m.__file__ = file
return m
def to_number(x):
if is_string(x):
try:
n = float(x)
return n
except ValueError:
pass
l = x.find('(')
if l != -1:
y = x[0:l - 1]
# print y
try:
n = float(y)
return n
except ValueError:
pass
return None
def compare_object_attrs_key(x):
if '__len__' == x:
# __len__ should appear after other attributes in a list.
num = 99999999
else:
num = to_number(x)
if num is not None:
return 1, num
else:
return -1, to_string(x)
if IS_PY3K:
def is_string(x):
return isinstance(x, str)
else:
def is_string(x):
return isinstance(x, basestring)
def to_string(x):
if is_string(x):
return x
else:
return str(x)
def print_exc():
if traceback:
traceback.print_exc()
if IS_PY3K:
def quote_smart(s, safe='/'):
return quote(s, safe)
else:
def quote_smart(s, safe='/'):
if isinstance(s, unicode):
s = s.encode('utf-8')
return quote(s, safe)
def get_clsname_for_code(code, frame):
clsname = None
if len(code.co_varnames) > 0:
# We are checking the first argument of the function
# (`self` or `cls` for methods).
first_arg_name = code.co_varnames[0]
if first_arg_name in frame.f_locals:
first_arg_obj = frame.f_locals[first_arg_name]
if inspect.isclass(first_arg_obj): # class method
first_arg_class = first_arg_obj
else: # instance method
first_arg_class = first_arg_obj.__class__
func_name = code.co_name
if hasattr(first_arg_class, func_name):
method = getattr(first_arg_class, func_name)
func_code = None
if hasattr(method, 'func_code'): # Python2
func_code = method.func_code
elif hasattr(method, '__code__'): # Python3
func_code = method.__code__
if func_code and func_code == code:
clsname = first_arg_class.__name__
return clsname
_PROJECT_ROOTS_CACHE = []
_LIBRARY_ROOTS_CACHE = []
_FILENAME_TO_IN_SCOPE_CACHE = {}
def _convert_to_str_and_clear_empty(roots):
if sys.version_info[0] <= 2:
# In py2 we need bytes for the files.
roots = [
root if not isinstance(root, unicode) else root.encode(sys.getfilesystemencoding())
for root in roots
]
new_roots = []
for root in roots:
assert isinstance(root, str), '%s not str (found: %s)' % (root, type(root))
if root:
new_roots.append(root)
return new_roots
def _clear_caches_related_to_scope_changes():
# Clear related caches.
_FILENAME_TO_IN_SCOPE_CACHE.clear()
debugger = get_global_debugger()
if debugger is not None:
debugger.clear_skip_caches()
def _set_roots(roots, cache):
roots = _convert_to_str_and_clear_empty(roots)
new_roots = []
for root in roots:
new_roots.append(_normpath(root))
cache.append(new_roots)
# Leave only the last one added.
del cache[:-1]
_clear_caches_related_to_scope_changes()
return new_roots
def _get_roots(cache, env_var, set_when_not_cached, get_default_val=None):
if not cache:
roots = os.getenv(env_var, None)
if roots is not None:
roots = roots.split(os.pathsep)
else:
if not get_default_val:
roots = []
else:
roots = get_default_val()
if not roots:
pydev_log.warn('%s being set to empty list.' % (env_var,))
set_when_not_cached(roots)
return cache[-1] # returns the roots with case normalized
def _get_default_library_roots():
# Provide sensible defaults if not in env vars.
import site
roots = [sys.prefix]
if hasattr(sys, 'base_prefix'):
roots.append(sys.base_prefix)
if hasattr(sys, 'real_prefix'):
roots.append(sys.real_prefix)
if hasattr(site, 'getusersitepackages'):
site_paths = site.getusersitepackages()
if isinstance(site_paths, (list, tuple)):
for site_path in site_paths:
roots.append(site_path)
else:
roots.append(site_paths)
if hasattr(site, 'getsitepackages'):
site_paths = site.getsitepackages()
if isinstance(site_paths, (list, tuple)):
for site_path in site_paths:
roots.append(site_path)
else:
roots.append(site_paths)
for path in sys.path:
if os.path.exists(path) and os.path.basename(path) == 'site-packages':
roots.append(path)
return sorted(set(roots))
# --- Project roots
def set_project_roots(project_roots):
project_roots = _set_roots(project_roots, _PROJECT_ROOTS_CACHE)
pydev_log.debug("IDE_PROJECT_ROOTS %s\n" % project_roots)
def _get_project_roots(project_roots_cache=_PROJECT_ROOTS_CACHE):
return _get_roots(project_roots_cache, 'IDE_PROJECT_ROOTS', set_project_roots)
# --- Library roots
def set_library_roots(roots):
roots = _set_roots(roots, _LIBRARY_ROOTS_CACHE)
pydev_log.debug("LIBRARY_ROOTS %s\n" % roots)
def _get_library_roots(library_roots_cache=_LIBRARY_ROOTS_CACHE):
return _get_roots(library_roots_cache, 'LIBRARY_ROOTS', set_library_roots, _get_default_library_roots)
def in_project_roots(filename, filename_to_in_scope_cache=_FILENAME_TO_IN_SCOPE_CACHE):
# Note: the filename_to_in_scope_cache is the same instance among the many calls to the method
try:
return filename_to_in_scope_cache[filename]
except:
project_roots = _get_project_roots()
original_filename = filename
if not filename.endswith('>'):
filename = _normpath(filename)
found_in_project = []
for root in project_roots:
if root and filename.startswith(root):
found_in_project.append(root)
found_in_library = []
library_roots = _get_library_roots()
for root in library_roots:
if root and filename.startswith(root):
found_in_library.append(root)
if not project_roots:
# If we have no project roots configured, consider it being in the project
# roots if it's not found in site-packages (because we have defaults for those
# and not the other way around).
if filename.endswith('>'):
in_project = False
else:
in_project = not found_in_library
else:
in_project = False
if found_in_project:
if not found_in_library:
in_project = True
else:
# Found in both, let's see which one has the bigger path matched.
if max(len(x) for x in found_in_project) > max(len(x) for x in found_in_library):
in_project = True
filename_to_in_scope_cache[original_filename] = in_project
return in_project
def is_exception_trace_in_project_scope(trace):
if trace is None:
return False
elif in_project_roots(trace.tb_frame.f_code.co_filename):
return True
else:
while trace is not None:
if not in_project_roots(trace.tb_frame.f_code.co_filename):
return False
trace = trace.tb_next
return True
def is_top_level_trace_in_project_scope(trace):
if trace is not None and trace.tb_next is not None:
return is_exception_trace_in_project_scope(trace) and not is_exception_trace_in_project_scope(trace.tb_next)
return is_exception_trace_in_project_scope(trace)
def is_test_item_or_set_up_caller(trace):
"""Check if the frame is the test item or set up caller.
A test function caller is a function that calls actual test code which can be, for example,
`unittest.TestCase` test method or function `pytest` assumes to be a test. A caller function
is the one we want to trace to catch failed test events. Tracing test functions
themselves is not possible because some exceptions can be caught in the test code, and
we are interested only in exceptions that are propagated to the test framework level.
"""
if not trace:
return False
frame = trace.tb_frame
abs_path, _, _ = pydevd_file_utils.get_abs_path_real_path_and_base_from_frame(frame)
if in_project_roots(abs_path):
# We are interested in exceptions made it to the test framework scope.
return False
if not trace.tb_next:
# This can happen when the exception has been raised inside a test item or set up caller.
return False
if not _is_next_stack_trace_in_project_roots(trace):
# The next stack frame must be the frame of a project scope function, otherwise we risk stopping
# at a line a few times since multiple test framework functions we are looking for may appear in the stack.
return False
# Set up and tear down methods can be checked immediately, since they are shared by both `pytest` and `unittest`.
unittest_set_up_and_tear_down_methods = ('_callSetUp', '_callTearDown')
if frame.f_code.co_name in unittest_set_up_and_tear_down_methods:
return True
# It is important to check if the tests are run with `pytest` first because it can run `unittest` code
# internally. This may lead to stopping on broken tests twice: one in the `pytest` test runner
# and second in the `unittest` runner.
is_pytest = False
f = frame
while f:
# noinspection SpellCheckingInspection
if f.f_code.co_name == 'pytest_cmdline_main':
is_pytest = True
f = f.f_back
unittest_caller_names = ['_callTestMethod', 'runTest', 'run']
if IS_PY3K:
unittest_caller_names.append('subTest')
if is_pytest:
# noinspection SpellCheckingInspection
if frame.f_code.co_name in ('pytest_pyfunc_call', 'call_fixture_func', '_eval_scope_callable', '_teardown_yield_fixture'):
return True
else:
return frame.f_code.co_name in unittest_caller_names
else:
import unittest
test_case_obj = frame.f_locals.get('self')
# Check for `_FailedTest` is important to detect cases when tests cannot be run on the first place,
# e.g. there was an import error in the test module. Can happen both in Python 3.8 and earlier versions.
if isinstance(test_case_obj, getattr(getattr(unittest, 'loader', None), '_FailedTest', None)):
return False
if frame.f_code.co_name in unittest_caller_names:
# unittest and nose
return True
return False
def _is_next_stack_trace_in_project_roots(trace):
if trace and trace.tb_next and trace.tb_next.tb_frame:
frame = trace.tb_next.tb_frame
return in_project_roots(pydevd_file_utils.get_abs_path_real_path_and_base_from_frame(frame)[0])
return False
# noinspection SpellCheckingInspection
def should_stop_on_failed_test(exc_info):
"""Check if the debugger should stop on failed test. Some failed tests can be marked as expected failures
and should be ignored because of that.
:param exc_info: exception type, value, and traceback
:return: `False` if test is marked as an expected failure, ``True`` otherwise.
"""
exc_type, _, trace = exc_info
# unittest
test_item = trace.tb_frame.f_locals.get('method') if IS_PY38_OR_GREATER else trace.tb_frame.f_locals.get('testMethod')
if test_item:
return not getattr(test_item, '__unittest_expecting_failure__', False)
# pytest
testfunction = trace.tb_frame.f_locals.get('testfunction')
if testfunction and hasattr(testfunction, 'pytestmark'):
# noinspection PyBroadException
try:
for attr in testfunction.pytestmark:
# noinspection PyUnresolvedReferences
if attr.name == 'xfail':
# noinspection PyUnresolvedReferences
exc_to_ignore = attr.kwargs.get('raises')
if not exc_to_ignore:
# All exceptions should be ignored, if no type is specified.
return False
elif hasattr(exc_to_ignore, '__iter__'):
return exc_type not in exc_to_ignore
else:
return exc_type is not exc_to_ignore
except BaseException:
pass
return True
def is_exception_in_test_unit_can_be_ignored(exception):
return exception.__name__ == 'SkipTest'
def get_top_level_trace_in_project_scope(trace):
while trace:
if is_top_level_trace_in_project_scope(trace):
break
trace = trace.tb_next
return trace
def is_filter_enabled():
return os.getenv('PYDEVD_FILTERS') is not None
def is_filter_libraries():
is_filter = os.getenv('PYDEVD_FILTER_LIBRARIES') is not None
pydev_log.debug("PYDEVD_FILTER_LIBRARIES %s\n" % is_filter)
return is_filter
def _get_stepping_filters(filters_cache=[]):
if not filters_cache:
filters = os.getenv('PYDEVD_FILTERS', '').split(';')
pydev_log.debug("PYDEVD_FILTERS %s\n" % filters)
new_filters = []
for new_filter in filters:
new_filters.append(new_filter)
filters_cache.append(new_filters)
return filters_cache[-1]
def is_ignored_by_filter(filename, filename_to_ignored_by_filters_cache={}):
try:
return filename_to_ignored_by_filters_cache[filename]
except:
import fnmatch
for stepping_filter in _get_stepping_filters():
if fnmatch.fnmatch(filename, stepping_filter):
pydev_log.debug("File %s ignored by filter %s" % (filename, stepping_filter))
filename_to_ignored_by_filters_cache[filename] = True
break
else:
filename_to_ignored_by_filters_cache[filename] = False
return filename_to_ignored_by_filters_cache[filename]
def get_non_pydevd_threads():
threads = threading.enumerate()
return [t for t in threads if t and not getattr(t, 'is_pydev_daemon_thread', False)]
def dump_threads(stream=None):
'''
Helper to dump thread info.
'''
if stream is None:
stream = sys.stderr
thread_id_to_name = {}
try:
for t in threading.enumerate():
thread_id_to_name[t.ident] = '%s (daemon: %s, pydevd thread: %s)' % (
t.name, t.daemon, getattr(t, 'is_pydev_daemon_thread', False))
except:
pass
from _pydevd_bundle.pydevd_additional_thread_info_regular import _current_frames
stream.write('===============================================================================\n')
stream.write('Threads running\n')
stream.write('================================= Thread Dump =================================\n')
stream.flush()
for thread_id, stack in _current_frames().items():
stream.write('\n-------------------------------------------------------------------------------\n')
stream.write(" Thread %s" % thread_id_to_name.get(thread_id, thread_id))
stream.write('\n\n')
for i, (filename, lineno, name, line) in enumerate(traceback.extract_stack(stack)):
stream.write(' File "%s", line %d, in %s\n' % (filename, lineno, name))
if line:
stream.write(" %s\n" % (line.strip()))
if i == 0 and 'self' in stack.f_locals:
stream.write(' self: ')
try:
stream.write(str(stack.f_locals['self']))
except:
stream.write('Unable to get str of: %s' % (type(stack.f_locals['self']),))
stream.write('\n')
stream.flush()
stream.write('\n=============================== END Thread Dump ===============================')
stream.flush()
def take_first_n_coll_elements(coll, n):
if coll.__class__ in (list, tuple):
return coll[:n]
elif coll.__class__ in (set, frozenset):
buf = []
for i, x in enumerate(coll):
if i >= n:
break
buf.append(x)
return type(coll)(buf)
elif coll.__class__ in (dict, OrderedDict):
ret = type(coll)()
for i, (k, v) in enumerate(dict_iter_items(coll)):
if i >= n:
break
ret[k] = v
return ret
else:
raise TypeError("Unsupported collection type: '%s'" % str(coll.__class__))
class VariableWithOffset(object):
def __init__(self, data, offset):
self.data, self.offset = data, offset
def get_var_and_offset(var):
if isinstance(var, VariableWithOffset):
return var.data, var.offset
return var, 0
def is_pandas_container(type_qualifier, var_type, var):
return var_type in ("DataFrame", "Series") and type_qualifier.startswith("pandas") and hasattr(var, "shape")
def is_numpy_container(type_qualifier, var_type, var):
return var_type == "ndarray" and type_qualifier == "numpy" and hasattr(var, "shape")
def is_builtin(x):
return getattr(x, '__module__', None) == BUILTINS_MODULE_NAME
def is_numpy(x):
if not getattr(x, '__module__', None) == 'numpy':
return False
type_name = x.__name__
return type_name == 'dtype' or type_name == 'bool_' or type_name == 'str_' or 'int' in type_name or 'uint' in type_name \
or 'float' in type_name or 'complex' in type_name
def should_evaluate_full_value(val):
return LOAD_VALUES_POLICY == ValuesPolicy.SYNC \
or ((is_builtin(type(val)) or is_numpy(type(val))) and not isinstance(val, (list, tuple, dict, set, frozenset))) \
or (is_in_unittests_debugging_mode() and isinstance(val, Exception))
def should_evaluate_shape():
return LOAD_VALUES_POLICY != ValuesPolicy.ON_DEMAND
def _series_to_str(s, max_items):
res = []
s = s[:max_items]
for item in s.iteritems():
# item: (index, value)
res.append(str(item))
return ' '.join(res)
def _df_to_str(value):
# Avoid using df.iteritems() or df.values[i], because it works very slow for large data frames
# df.__str__() is already optimised and works fast enough
res = []
rows = value.split('\n')
for (i, r) in enumerate(rows):
if i == 0:
res.append(r.strip())
else:
res.append("[%s]" % r)
return ' '.join(res)
def pandas_to_str(df, type_name, str_value, max_items):
try:
if type_name == "Series":
return _series_to_str(df, max_items)
elif type_name == "DataFrame":
return _df_to_str(str_value)
else:
return str(df)
except Exception as e:
pydev_log.warn("Failed to format pandas variable: " + str(e))
return str(df)
def format_numpy_array(num_array, max_items):
return str(num_array[:max_items]).replace('\n', ',').strip()
def is_in_unittests_debugging_mode():
debugger = get_global_debugger()
if debugger:
return debugger.stop_on_failed_tests
|
apache-2.0
|
ryfeus/lambda-packs
|
LightGBM_sklearn_scipy_numpy/source/sklearn/neighbors/classification.py
|
15
|
14338
|
"""Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`kneighbors` queries.
weights : str or callable, optional (default = 'uniform')
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.zeros(len(neigh_ind), dtype=object)
pred_labels[:] = [_y[ind, k] for ind in neigh_ind]
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights[inliers])],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
|
mit
|
ayush29feb/cs231n
|
assignment1/cs231n/features.py
|
30
|
4807
|
import matplotlib
import numpy as np
from scipy.ndimage import uniform_filter
def extract_features(imgs, feature_fns, verbose=False):
"""
Given pixel data for images and several feature functions that can operate on
single images, apply all feature functions to all images, concatenating the
feature vectors for each image and storing the features for all images in
a single matrix.
Inputs:
- imgs: N x H X W X C array of pixel data for N images.
- feature_fns: List of k feature functions. The ith feature function should
take as input an H x W x D array and return a (one-dimensional) array of
length F_i.
- verbose: Boolean; if true, print progress.
Returns:
An array of shape (N, F_1 + ... + F_k) where each column is the concatenation
of all features for a single image.
"""
num_images = imgs.shape[0]
if num_images == 0:
return np.array([])
# Use the first image to determine feature dimensions
feature_dims = []
first_image_features = []
for feature_fn in feature_fns:
feats = feature_fn(imgs[0].squeeze())
assert len(feats.shape) == 1, 'Feature functions must be one-dimensional'
feature_dims.append(feats.size)
first_image_features.append(feats)
# Now that we know the dimensions of the features, we can allocate a single
# big array to store all features as columns.
total_feature_dim = sum(feature_dims)
imgs_features = np.zeros((num_images, total_feature_dim))
imgs_features[0] = np.hstack(first_image_features).T
# Extract features for the rest of the images.
for i in xrange(1, num_images):
idx = 0
for feature_fn, feature_dim in zip(feature_fns, feature_dims):
next_idx = idx + feature_dim
imgs_features[i, idx:next_idx] = feature_fn(imgs[i].squeeze())
idx = next_idx
if verbose and i % 1000 == 0:
print 'Done extracting features for %d / %d images' % (i, num_images)
return imgs_features
def rgb2gray(rgb):
"""Convert RGB image to grayscale
Parameters:
rgb : RGB image
Returns:
gray : grayscale image
"""
return np.dot(rgb[...,:3], [0.299, 0.587, 0.144])
def hog_feature(im):
"""Compute Histogram of Gradient (HOG) feature for an image
Modified from skimage.feature.hog
http://pydoc.net/Python/scikits-image/0.4.2/skimage.feature.hog
Reference:
Histograms of Oriented Gradients for Human Detection
Navneet Dalal and Bill Triggs, CVPR 2005
Parameters:
im : an input grayscale or rgb image
Returns:
feat: Histogram of Gradient (HOG) feature
"""
# convert rgb to grayscale if needed
if im.ndim == 3:
image = rgb2gray(im)
else:
image = np.at_least_2d(im)
sx, sy = image.shape # image size
orientations = 9 # number of gradient bins
cx, cy = (8, 8) # pixels per cell
gx = np.zeros(image.shape)
gy = np.zeros(image.shape)
gx[:, :-1] = np.diff(image, n=1, axis=1) # compute gradient on x-direction
gy[:-1, :] = np.diff(image, n=1, axis=0) # compute gradient on y-direction
grad_mag = np.sqrt(gx ** 2 + gy ** 2) # gradient magnitude
grad_ori = np.arctan2(gy, (gx + 1e-15)) * (180 / np.pi) + 90 # gradient orientation
n_cellsx = int(np.floor(sx / cx)) # number of cells in x
n_cellsy = int(np.floor(sy / cy)) # number of cells in y
# compute orientations integral images
orientation_histogram = np.zeros((n_cellsx, n_cellsy, orientations))
for i in range(orientations):
# create new integral image for this orientation
# isolate orientations in this range
temp_ori = np.where(grad_ori < 180 / orientations * (i + 1),
grad_ori, 0)
temp_ori = np.where(grad_ori >= 180 / orientations * i,
temp_ori, 0)
# select magnitudes for those orientations
cond2 = temp_ori > 0
temp_mag = np.where(cond2, grad_mag, 0)
orientation_histogram[:,:,i] = uniform_filter(temp_mag, size=(cx, cy))[cx/2::cx, cy/2::cy].T
return orientation_histogram.ravel()
def color_histogram_hsv(im, nbin=10, xmin=0, xmax=255, normalized=True):
"""
Compute color histogram for an image using hue.
Inputs:
- im: H x W x C array of pixel data for an RGB image.
- nbin: Number of histogram bins. (default: 10)
- xmin: Minimum pixel value (default: 0)
- xmax: Maximum pixel value (default: 255)
- normalized: Whether to normalize the histogram (default: True)
Returns:
1D vector of length nbin giving the color histogram over the hue of the
input image.
"""
ndim = im.ndim
bins = np.linspace(xmin, xmax, nbin+1)
hsv = matplotlib.colors.rgb_to_hsv(im/xmax) * xmax
imhist, bin_edges = np.histogram(hsv[:,:,0], bins=bins, density=normalized)
imhist = imhist * np.diff(bin_edges)
# return histogram
return imhist
pass
|
mit
|
cjayb/mne-python
|
mne/channels/layout.py
|
1
|
36179
|
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Marijn van Vliet <[email protected]>
# Jona Sassenhagen <[email protected]>
# Teon Brooks <[email protected]>
# Robert Luke <[email protected]>
#
# License: Simplified BSD
import logging
from collections import defaultdict
from itertools import combinations
import os.path as op
import numpy as np
from ..transforms import _pol_to_cart, _cart_to_sph
from ..io.pick import pick_types, _picks_to_idx
from ..io.constants import FIFF
from ..io.meas_info import Info
from ..utils import (_clean_names, warn, _check_ch_locs, fill_doc,
_check_option, _check_sphere)
from .channels import _get_ch_info
class Layout(object):
"""Sensor layouts.
Layouts are typically loaded from a file using read_layout. Only use this
class directly if you're constructing a new layout.
Parameters
----------
box : tuple of length 4
The box dimension (x_min, x_max, y_min, y_max).
pos : array, shape=(n_channels, 4)
The positions of the channels in 2d (x, y, width, height).
names : list
The channel names.
ids : list
The channel ids.
kind : str
The type of Layout (e.g. 'Vectorview-all').
"""
def __init__(self, box, pos, names, ids, kind): # noqa: D102
self.box = box
self.pos = pos
self.names = names
self.ids = ids
self.kind = kind
def save(self, fname):
"""Save Layout to disk.
Parameters
----------
fname : str
The file name (e.g. 'my_layout.lout').
See Also
--------
read_layout
"""
x = self.pos[:, 0]
y = self.pos[:, 1]
width = self.pos[:, 2]
height = self.pos[:, 3]
if fname.endswith('.lout'):
out_str = '%8.2f %8.2f %8.2f %8.2f\n' % self.box
elif fname.endswith('.lay'):
out_str = ''
else:
raise ValueError('Unknown layout type. Should be of type '
'.lout or .lay.')
for ii in range(x.shape[0]):
out_str += ('%03d %8.2f %8.2f %8.2f %8.2f %s\n'
% (self.ids[ii], x[ii], y[ii],
width[ii], height[ii], self.names[ii]))
f = open(fname, 'w')
f.write(out_str)
f.close()
def __repr__(self):
"""Return the string representation."""
return '<Layout | %s - Channels: %s ...>' % (self.kind,
', '.join(self.names[:3]))
@fill_doc
def plot(self, picks=None, show=True):
"""Plot the sensor positions.
Parameters
----------
%(picks_nostr)s
show : bool
Show figure if True. Defaults to True.
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure containing the sensor topography.
Notes
-----
.. versionadded:: 0.12.0
"""
from ..viz.topomap import plot_layout
return plot_layout(self, picks=picks, show=show)
def _read_lout(fname):
"""Aux function."""
with open(fname) as f:
box_line = f.readline() # first line contains box dimension
box = tuple(map(float, box_line.split()))
names, pos, ids = [], [], []
for line in f:
splits = line.split()
if len(splits) == 7:
cid, x, y, dx, dy, chkind, nb = splits
name = chkind + ' ' + nb
else:
cid, x, y, dx, dy, name = splits
pos.append(np.array([x, y, dx, dy], dtype=np.float64))
names.append(name)
ids.append(int(cid))
pos = np.array(pos)
return box, pos, names, ids
def _read_lay(fname):
"""Aux function."""
with open(fname) as f:
box = None
names, pos, ids = [], [], []
for line in f:
splits = line.split()
if len(splits) == 7:
cid, x, y, dx, dy, chkind, nb = splits
name = chkind + ' ' + nb
else:
cid, x, y, dx, dy, name = splits
pos.append(np.array([x, y, dx, dy], dtype=np.float64))
names.append(name)
ids.append(int(cid))
pos = np.array(pos)
return box, pos, names, ids
def read_layout(kind, path=None, scale=True):
"""Read layout from a file.
Parameters
----------
kind : str
The name of the .lout file (e.g. kind='Vectorview-all' for
'Vectorview-all.lout').
path : str | None
The path of the folder containing the Layout file. Defaults to the
mne/channels/data/layouts folder inside your mne-python installation.
scale : bool
Apply useful scaling for out the box plotting using layout.pos.
Defaults to True.
Returns
-------
layout : instance of Layout
The layout.
See Also
--------
Layout.save
"""
if path is None:
path = op.join(op.dirname(__file__), 'data', 'layouts')
if not kind.endswith('.lout') and op.exists(op.join(path, kind + '.lout')):
kind += '.lout'
elif not kind.endswith('.lay') and op.exists(op.join(path, kind + '.lay')):
kind += '.lay'
if kind.endswith('.lout'):
fname = op.join(path, kind)
kind = kind[:-5]
box, pos, names, ids = _read_lout(fname)
elif kind.endswith('.lay'):
fname = op.join(path, kind)
kind = kind[:-4]
box, pos, names, ids = _read_lay(fname)
kind.endswith('.lay')
else:
raise ValueError('Unknown layout type. Should be of type '
'.lout or .lay.')
if scale:
pos[:, 0] -= np.min(pos[:, 0])
pos[:, 1] -= np.min(pos[:, 1])
scaling = max(np.max(pos[:, 0]), np.max(pos[:, 1])) + pos[0, 2]
pos /= scaling
pos[:, :2] += 0.03
pos[:, :2] *= 0.97 / 1.03
pos[:, 2:] *= 0.94
return Layout(box=box, pos=pos, names=names, kind=kind, ids=ids)
def make_eeg_layout(info, radius=0.5, width=None, height=None, exclude='bads',
csd=False):
"""Create .lout file from EEG electrode digitization.
Parameters
----------
info : instance of Info
Measurement info (e.g., raw.info).
radius : float
Viewport radius as a fraction of main figure height. Defaults to 0.5.
width : float | None
Width of sensor axes as a fraction of main figure height. By default,
this will be the maximum width possible without axes overlapping.
height : float | None
Height of sensor axes as a fraction of main figure height. By default,
this will be the maximum height possible without axes overlapping.
exclude : list of str | str
List of channels to exclude. If empty do not exclude any.
If 'bads', exclude channels in info['bads'] (default).
csd : bool
Whether the channels contain current-source-density-transformed data.
Returns
-------
layout : Layout
The generated Layout.
See Also
--------
make_grid_layout, generate_2d_layout
"""
if not (0 <= radius <= 0.5):
raise ValueError('The radius parameter should be between 0 and 0.5.')
if width is not None and not (0 <= width <= 1.0):
raise ValueError('The width parameter should be between 0 and 1.')
if height is not None and not (0 <= height <= 1.0):
raise ValueError('The height parameter should be between 0 and 1.')
pick_kwargs = dict(meg=False, eeg=True, ref_meg=False, exclude=exclude)
if csd:
pick_kwargs.update(csd=True, eeg=False)
picks = pick_types(info, **pick_kwargs)
loc2d = _find_topomap_coords(info, picks)
names = [info['chs'][i]['ch_name'] for i in picks]
# Scale [x, y] to be in the range [-0.5, 0.5]
# Don't mess with the origin or aspect ratio
scale = np.maximum(-np.min(loc2d, axis=0), np.max(loc2d, axis=0)).max() * 2
loc2d /= scale
# If no width or height specified, calculate the maximum value possible
# without axes overlapping.
if width is None or height is None:
width, height = _box_size(loc2d, width, height, padding=0.1)
# Scale to viewport radius
loc2d *= 2 * radius
# Some subplot centers will be at the figure edge. Shrink everything so it
# fits in the figure.
scaling = min(1 / (1. + width), 1 / (1. + height))
loc2d *= scaling
width *= scaling
height *= scaling
# Shift to center
loc2d += 0.5
n_channels = loc2d.shape[0]
pos = np.c_[loc2d[:, 0] - 0.5 * width,
loc2d[:, 1] - 0.5 * height,
width * np.ones(n_channels),
height * np.ones(n_channels)]
box = (0, 1, 0, 1)
ids = 1 + np.arange(n_channels)
layout = Layout(box=box, pos=pos, names=names, kind='EEG', ids=ids)
return layout
@fill_doc
def make_grid_layout(info, picks=None, n_col=None):
"""Generate .lout file for custom data, i.e., ICA sources.
Parameters
----------
info : instance of Info | None
Measurement info (e.g., raw.info). If None, default names will be
employed.
%(picks_base)s all good misc channels.
n_col : int | None
Number of columns to generate. If None, a square grid will be produced.
Returns
-------
layout : Layout
The generated layout.
See Also
--------
make_eeg_layout, generate_2d_layout
"""
picks = _picks_to_idx(info, picks, 'misc')
names = [info['chs'][k]['ch_name'] for k in picks]
if not names:
raise ValueError('No misc data channels found.')
ids = list(range(len(picks)))
size = len(picks)
if n_col is None:
# prepare square-like layout
n_row = n_col = np.sqrt(size) # try square
if n_col % 1:
# try n * (n-1) rectangle
n_col, n_row = int(n_col + 1), int(n_row)
if n_col * n_row < size: # jump to the next full square
n_row += 1
else:
n_row = int(np.ceil(size / float(n_col)))
# setup position grid
x, y = np.meshgrid(np.linspace(-0.5, 0.5, n_col),
np.linspace(-0.5, 0.5, n_row))
x, y = x.ravel()[:size], y.ravel()[:size]
width, height = _box_size(np.c_[x, y], padding=0.1)
# Some axes will be at the figure edge. Shrink everything so it fits in the
# figure. Add 0.01 border around everything
border_x, border_y = (0.01, 0.01)
x_scaling = 1 / (1. + width + border_x)
y_scaling = 1 / (1. + height + border_y)
x = x * x_scaling
y = y * y_scaling
width *= x_scaling
height *= y_scaling
# Shift to center
x += 0.5
y += 0.5
# calculate pos
pos = np.c_[x - 0.5 * width, y - 0.5 * height,
width * np.ones(size), height * np.ones(size)]
box = (0, 1, 0, 1)
layout = Layout(box=box, pos=pos, names=names, kind='grid-misc', ids=ids)
return layout
def find_layout(info, ch_type=None, exclude='bads'):
"""Choose a layout based on the channels in the info 'chs' field.
Parameters
----------
info : instance of Info
The measurement info.
ch_type : {'mag', 'grad', 'meg', 'eeg'} | None
The channel type for selecting single channel layouts.
Defaults to None. Note, this argument will only be considered for
VectorView type layout. Use ``'meg'`` to force using the full layout
in situations where the info does only contain one sensor type.
exclude : list of str | str
List of channels to exclude. If empty do not exclude any.
If 'bads', exclude channels in info['bads'] (default).
Returns
-------
layout : Layout instance | None
None if layout not found.
"""
_check_option('ch_type', ch_type, [None, 'mag', 'grad', 'meg', 'eeg',
'csd'])
(has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types,
has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils,
has_eeg_coils_and_meg, has_eeg_coils_only,
has_neuromag_122_grad, has_csd_coils) = _get_ch_info(info)
has_vv_meg = has_vv_mag and has_vv_grad
has_vv_only_mag = has_vv_mag and not has_vv_grad
has_vv_only_grad = has_vv_grad and not has_vv_mag
if ch_type == "meg" and not has_any_meg:
raise RuntimeError('No MEG channels present. Cannot find MEG layout.')
if ch_type == "eeg" and not has_eeg_coils:
raise RuntimeError('No EEG channels present. Cannot find EEG layout.')
layout_name = None
if ((has_vv_meg and ch_type is None) or
(any([has_vv_mag, has_vv_grad]) and ch_type == 'meg')):
layout_name = 'Vectorview-all'
elif has_vv_only_mag or (has_vv_meg and ch_type == 'mag'):
layout_name = 'Vectorview-mag'
elif has_vv_only_grad or (has_vv_meg and ch_type == 'grad'):
if info['ch_names'][0].endswith('X'):
layout_name = 'Vectorview-grad_norm'
else:
layout_name = 'Vectorview-grad'
elif has_neuromag_122_grad:
layout_name = 'Neuromag_122'
elif ((has_eeg_coils_only and ch_type in [None, 'eeg']) or
(has_eeg_coils_and_meg and ch_type == 'eeg')):
if not isinstance(info, (dict, Info)):
raise RuntimeError('Cannot make EEG layout, no measurement info '
'was passed to `find_layout`')
return make_eeg_layout(info, exclude=exclude)
elif has_csd_coils and ch_type in [None, 'csd']:
return make_eeg_layout(info, exclude=exclude, csd=True)
elif has_4D_mag:
layout_name = 'magnesWH3600'
elif has_CTF_grad:
layout_name = 'CTF-275'
elif n_kit_grads > 0:
layout_name = _find_kit_layout(info, n_kit_grads)
# If no known layout is found, fall back on automatic layout
if layout_name is None:
picks = _picks_to_idx(info, 'data', exclude=(), with_ref_meg=False)
ch_names = [info['ch_names'][pick] for pick in picks]
xy = _find_topomap_coords(info, picks=picks, ignore_overlap=True)
return generate_2d_layout(xy, ch_names=ch_names, name='custom',
normalize=True)
layout = read_layout(layout_name)
if not is_old_vv:
layout.names = _clean_names(layout.names, remove_whitespace=True)
if has_CTF_grad:
layout.names = _clean_names(layout.names, before_dash=True)
# Apply mask for excluded channels.
if exclude == 'bads':
exclude = info['bads']
idx = [ii for ii, name in enumerate(layout.names) if name not in exclude]
layout.names = [layout.names[ii] for ii in idx]
layout.pos = layout.pos[idx]
layout.ids = [layout.ids[ii] for ii in idx]
return layout
def _find_kit_layout(info, n_grads):
"""Determine the KIT layout.
Parameters
----------
info : Info
Info object.
n_grads : int
Number of KIT-gradiometers in the info.
Returns
-------
kit_layout : str | None
String naming the detected KIT layout or ``None`` if layout is missing.
"""
if info['kit_system_id'] is not None:
# avoid circular import
from ..io.kit.constants import KIT_LAYOUT
return KIT_LAYOUT.get(info['kit_system_id'])
elif n_grads == 160:
return 'KIT-160'
elif n_grads == 125:
return 'KIT-125'
elif n_grads > 157:
return 'KIT-AD'
# channels which are on the left hemisphere for NY and right for UMD
test_chs = ('MEG 13', 'MEG 14', 'MEG 15', 'MEG 16', 'MEG 25',
'MEG 26', 'MEG 27', 'MEG 28', 'MEG 29', 'MEG 30',
'MEG 31', 'MEG 32', 'MEG 57', 'MEG 60', 'MEG 61',
'MEG 62', 'MEG 63', 'MEG 64', 'MEG 73', 'MEG 90',
'MEG 93', 'MEG 95', 'MEG 96', 'MEG 105', 'MEG 112',
'MEG 120', 'MEG 121', 'MEG 122', 'MEG 123', 'MEG 124',
'MEG 125', 'MEG 126', 'MEG 142', 'MEG 144', 'MEG 153',
'MEG 154', 'MEG 155', 'MEG 156')
x = [ch['loc'][0] < 0 for ch in info['chs'] if ch['ch_name'] in test_chs]
if np.all(x):
return 'KIT-157' # KIT-NY
elif np.all(np.invert(x)):
raise NotImplementedError("Guessing sensor layout for legacy UMD "
"files is not implemented. Please convert "
"your files using MNE-Python 0.13 or "
"higher.")
else:
raise RuntimeError("KIT system could not be determined for data")
def _box_size(points, width=None, height=None, padding=0.0):
"""Given a series of points, calculate an appropriate box size.
Parameters
----------
points : array, shape (n_points, 2)
The centers of the axes as a list of (x, y) coordinate pairs. Normally
these are points in the range [0, 1] centered at 0.5.
width : float | None
An optional box width to enforce. When set, only the box height will be
calculated by the function.
height : float | None
An optional box height to enforce. When set, only the box width will be
calculated by the function.
padding : float
Portion of the box to reserve for padding. The value can range between
0.0 (boxes will touch, default) to 1.0 (boxes consist of only padding).
Returns
-------
width : float
Width of the box
height : float
Height of the box
"""
from scipy.spatial.distance import pdist
def xdiff(a, b):
return np.abs(a[0] - b[0])
def ydiff(a, b):
return np.abs(a[1] - b[1])
points = np.asarray(points)
all_combinations = list(combinations(points, 2))
if width is None and height is None:
if len(points) <= 1:
# Trivial case first
width = 1.0
height = 1.0
else:
# Find the closest two points A and B.
a, b = all_combinations[np.argmin(pdist(points))]
# The closest points define either the max width or max height.
w, h = xdiff(a, b), ydiff(a, b)
if w > h:
width = w
else:
height = h
# At this point, either width or height is known, or both are known.
if height is None:
# Find all axes that could potentially overlap horizontally.
hdist = pdist(points, xdiff)
candidates = [all_combinations[i] for i, d in enumerate(hdist)
if d < width]
if len(candidates) == 0:
# No axes overlap, take all the height you want.
height = 1.0
else:
# Find an appropriate height so all none of the found axes will
# overlap.
height = np.min([ydiff(*c) for c in candidates])
elif width is None:
# Find all axes that could potentially overlap vertically.
vdist = pdist(points, ydiff)
candidates = [all_combinations[i] for i, d in enumerate(vdist)
if d < height]
if len(candidates) == 0:
# No axes overlap, take all the width you want.
width = 1.0
else:
# Find an appropriate width so all none of the found axes will
# overlap.
width = np.min([xdiff(*c) for c in candidates])
# Add a bit of padding between boxes
width *= 1 - padding
height *= 1 - padding
return width, height
def _find_topomap_coords(info, picks, layout=None, ignore_overlap=False,
to_sphere=True, sphere=None):
"""Guess the E/MEG layout and return appropriate topomap coordinates.
Parameters
----------
info : instance of Info
Measurement info.
picks : str | list | slice | None
None will choose all channels.
layout : None | instance of Layout
Enforce using a specific layout. With None, a new map is generated
and a layout is chosen based on the channels in the picks
parameter.
sphere : array-like | str
Definition of the head sphere.
Returns
-------
coords : array, shape = (n_chs, 2)
2 dimensional coordinates for each sensor for a topomap plot.
"""
picks = _picks_to_idx(info, picks, 'all', exclude=(), allow_empty=False)
if layout is not None:
chs = [info['chs'][i] for i in picks]
pos = [layout.pos[layout.names.index(ch['ch_name'])] for ch in chs]
pos = np.asarray(pos)
else:
pos = _auto_topomap_coords(
info, picks, ignore_overlap=ignore_overlap, to_sphere=to_sphere,
sphere=sphere)
return pos
def _auto_topomap_coords(info, picks, ignore_overlap, to_sphere, sphere):
"""Make a 2 dimensional sensor map from sensor positions in an info dict.
The default is to use the electrode locations. The fallback option is to
attempt using digitization points of kind FIFFV_POINT_EEG. This only works
with EEG and requires an equal number of digitization points and sensors.
Parameters
----------
info : instance of Info
The measurement info.
picks : list | str | slice | None
None will pick all channels.
ignore_overlap : bool
Whether to ignore overlapping positions in the layout. If False and
positions overlap, an error is thrown.
to_sphere : bool
If True, the radial distance of spherical coordinates is ignored, in
effect fitting the xyz-coordinates to a sphere.
sphere : array-like | str
The head sphere definition.
Returns
-------
locs : array, shape = (n_sensors, 2)
An array of positions of the 2 dimensional map.
"""
from scipy.spatial.distance import pdist, squareform
sphere = _check_sphere(sphere, info)
picks = _picks_to_idx(info, picks, 'all', exclude=(), allow_empty=False)
chs = [info['chs'][i] for i in picks]
# Use channel locations if available
locs3d = np.array([ch['loc'][:3] for ch in chs])
# If electrode locations are not available, use digization points
if not _check_ch_locs(chs):
logging.warning('Did not find any electrode locations (in the info '
'object), will attempt to use digitization points '
'instead. However, if digitization points do not '
'correspond to the EEG electrodes, this will lead to '
'bad results. Please verify that the sensor locations '
'in the plot are accurate.')
# MEG/EOG/ECG sensors don't have digitization points; all requested
# channels must be EEG
for ch in chs:
if ch['kind'] != FIFF.FIFFV_EEG_CH:
raise ValueError("Cannot determine location of MEG/EOG/ECG "
"channels using digitization points.")
eeg_ch_names = [ch['ch_name'] for ch in info['chs']
if ch['kind'] == FIFF.FIFFV_EEG_CH]
# Get EEG digitization points
if info['dig'] is None or len(info['dig']) == 0:
raise RuntimeError('No digitization points found.')
locs3d = np.array([point['r'] for point in info['dig']
if point['kind'] == FIFF.FIFFV_POINT_EEG])
if len(locs3d) == 0:
raise RuntimeError('Did not find any digitization points of '
'kind FIFFV_POINT_EEG (%d) in the info.'
% FIFF.FIFFV_POINT_EEG)
if len(locs3d) != len(eeg_ch_names):
raise ValueError("Number of EEG digitization points (%d) "
"doesn't match the number of EEG channels "
"(%d)" % (len(locs3d), len(eeg_ch_names)))
# We no longer center digitization points on head origin, as we work
# in head coordinates always
# Match the digitization points with the requested
# channels.
eeg_ch_locs = dict(zip(eeg_ch_names, locs3d))
locs3d = np.array([eeg_ch_locs[ch['ch_name']] for ch in chs])
# Sometimes we can get nans
locs3d[~np.isfinite(locs3d)] = 0.
# Duplicate points cause all kinds of trouble during visualization
dist = pdist(locs3d)
if len(locs3d) > 1 and np.min(dist) < 1e-10 and not ignore_overlap:
problematic_electrodes = [
chs[elec_i]['ch_name']
for elec_i in squareform(dist < 1e-10).any(axis=0).nonzero()[0]
]
raise ValueError('The following electrodes have overlapping positions,'
' which causes problems during visualization:\n' +
', '.join(problematic_electrodes))
if to_sphere:
# translate to sphere origin, transform/flatten Z, translate back
locs3d -= sphere[:3]
# use spherical (theta, pol) as (r, theta) for polar->cartesian
cart_coords = _cart_to_sph(locs3d)
out = _pol_to_cart(cart_coords[:, 1:][:, ::-1])
# scale from radians to mm
out *= cart_coords[:, [0]] / (np.pi / 2.)
out += sphere[:2]
else:
out = _pol_to_cart(_cart_to_sph(locs3d))
return out
def _topo_to_sphere(pos, eegs):
"""Transform xy-coordinates to sphere.
Parameters
----------
pos : array-like, shape (n_channels, 2)
xy-oordinates to transform.
eegs : list of int
Indices of EEG channels that are included when calculating the sphere.
Returns
-------
coords : array, shape (n_channels, 3)
xyz-coordinates.
"""
xs, ys = np.array(pos).T
sqs = np.max(np.sqrt((xs[eegs] ** 2) + (ys[eegs] ** 2)))
xs /= sqs # Shape to a sphere and normalize
ys /= sqs
xs += 0.5 - np.mean(xs[eegs]) # Center the points
ys += 0.5 - np.mean(ys[eegs])
xs = xs * 2. - 1. # Values ranging from -1 to 1
ys = ys * 2. - 1.
rs = np.clip(np.sqrt(xs ** 2 + ys ** 2), 0., 1.)
alphas = np.arccos(rs)
zs = np.sin(alphas)
return np.column_stack([xs, ys, zs])
def _pair_grad_sensors(info, layout=None, topomap_coords=True, exclude='bads',
raise_error=True):
"""Find the picks for pairing grad channels.
Parameters
----------
info : instance of Info
An info dictionary containing channel information.
layout : Layout | None
The layout if available. Defaults to None.
topomap_coords : bool
Return the coordinates for a topomap plot along with the picks. If
False, only picks are returned. Defaults to True.
exclude : list of str | str
List of channels to exclude. If empty, do not exclude any.
If 'bads', exclude channels in info['bads']. Defaults to 'bads'.
raise_error : bool
Whether to raise an error when no pairs are found. If False, raises a
warning.
Returns
-------
picks : array of int
Picks for the grad channels, ordered in pairs.
coords : array, shape = (n_grad_channels, 3)
Coordinates for a topomap plot (optional, only returned if
topomap_coords == True).
"""
# find all complete pairs of grad channels
pairs = defaultdict(list)
grad_picks = pick_types(info, meg='grad', ref_meg=False, exclude=exclude)
_, has_vv_grad, *_, has_neuromag_122_grad, _ = _get_ch_info(info)
for i in grad_picks:
ch = info['chs'][i]
name = ch['ch_name']
if has_vv_grad and name.startswith('MEG'):
if name.endswith(('2', '3')):
key = name[-4:-1]
pairs[key].append(ch)
if has_neuromag_122_grad and name.startswith('MEG'):
key = (int(name[-3:]) - 1) // 2
pairs[key].append(ch)
pairs = [p for p in pairs.values() if len(p) == 2]
if len(pairs) == 0:
if raise_error:
raise ValueError("No 'grad' channel pairs found.")
else:
warn("No 'grad' channel pairs found.")
return list()
# find the picks corresponding to the grad channels
grad_chs = sum(pairs, [])
ch_names = info['ch_names']
picks = [ch_names.index(c['ch_name']) for c in grad_chs]
if topomap_coords:
shape = (len(pairs), 2, -1)
coords = (_find_topomap_coords(info, picks, layout)
.reshape(shape).mean(axis=1))
return picks, coords
else:
return picks
# this function is used to pair grad when info is not present
# it is the case of Projection that don't have the info.
def _pair_grad_sensors_ch_names_vectorview(ch_names):
"""Find the indices for pairing grad channels in a Vectorview system.
Parameters
----------
ch_names : list of str
A list of channel names.
Returns
-------
indexes : list of int
Indices of the grad channels, ordered in pairs.
"""
pairs = defaultdict(list)
for i, name in enumerate(ch_names):
if name.startswith('MEG'):
if name.endswith(('2', '3')):
key = name[-4:-1]
pairs[key].append(i)
pairs = [p for p in pairs.values() if len(p) == 2]
grad_chs = sum(pairs, [])
return grad_chs
# this function is used to pair grad when info is not present
# it is the case of Projection that don't have the info.
def _pair_grad_sensors_ch_names_neuromag122(ch_names):
"""Find the indices for pairing grad channels in a Neuromag 122 system.
Parameters
----------
ch_names : list of str
A list of channel names.
Returns
-------
indexes : list of int
Indices of the grad channels, ordered in pairs.
"""
pairs = defaultdict(list)
for i, name in enumerate(ch_names):
if name.startswith('MEG'):
key = (int(name[-3:]) - 1) // 2
pairs[key].append(i)
pairs = [p for p in pairs.values() if len(p) == 2]
grad_chs = sum(pairs, [])
return grad_chs
def _merge_ch_data(data, ch_type, names, method='rms'):
"""Merge data from channel pairs.
Parameters
----------
data : array, shape = (n_channels, ..., n_times)
Data for channels, ordered in pairs.
ch_type : str
Channel type.
names : list
List of channel names.
method : str
Can be 'rms' or 'mean'.
Returns
-------
data : array, shape = (n_channels / 2, ..., n_times)
The root mean square or mean for each pair.
names : list
List of channel names.
"""
if ch_type == 'grad':
data = _merge_grad_data(data, method)
else:
assert ch_type in ('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od')
data, names = _merge_nirs_data(data, names)
return data, names
def _merge_grad_data(data, method='rms'):
"""Merge data from channel pairs using the RMS or mean.
Parameters
----------
data : array, shape = (n_channels, ..., n_times)
Data for channels, ordered in pairs.
method : str
Can be 'rms' or 'mean'.
Returns
-------
data : array, shape = (n_channels / 2, ..., n_times)
The root mean square or mean for each pair.
"""
data, orig_shape = data.reshape((len(data) // 2, 2, -1)), data.shape
if method == 'mean':
data = np.mean(data, axis=1)
elif method == 'rms':
data = np.sqrt(np.sum(data ** 2, axis=1) / 2)
else:
raise ValueError('method must be "rms" or "mean", got %s.' % method)
return data.reshape(data.shape[:1] + orig_shape[1:])
def _merge_nirs_data(data, merged_names):
"""Merge data from multiple nirs channel using the mean.
Channel names that have an x in them will be merged. The first channel in
the name is replaced with the mean of all listed channels. The other
channels are removed.
Parameters
----------
data : array, shape = (n_channels, ..., n_times)
Data for channels.
merged_names : list
List of strings containing the channel names. Channels that are to be
merged contain an x between them.
Returns
-------
data : array
Data for channels with requested channels merged. Channels used in the
merge are removed from the array.
"""
to_remove = np.empty(0, dtype=np.int32)
for idx, ch in enumerate(merged_names):
if 'x' in ch:
indices = np.empty(0, dtype=np.int32)
channels = ch.split("x")
for sub_ch in channels[1:]:
indices = np.append(indices, merged_names.index(sub_ch))
data[idx] = np.mean(data[np.append(idx, indices)], axis=0)
to_remove = np.append(to_remove, indices)
for rem in sorted(to_remove, reverse=True):
del merged_names[rem]
data = np.delete(data, rem, 0)
return data, merged_names
def generate_2d_layout(xy, w=.07, h=.05, pad=.02, ch_names=None,
ch_indices=None, name='ecog', bg_image=None,
normalize=True):
"""Generate a custom 2D layout from xy points.
Generates a 2-D layout for plotting with plot_topo methods and
functions. XY points will be normalized between 0 and 1, where
normalization extremes will be either the min/max of xy, or
the width/height of bg_image.
Parameters
----------
xy : ndarray, shape (N, 2)
The xy coordinates of sensor locations.
w : float
The width of each sensor's axis (between 0 and 1)
h : float
The height of each sensor's axis (between 0 and 1)
pad : float
Portion of the box to reserve for padding. The value can range between
0.0 (boxes will touch, default) to 1.0 (boxes consist of only padding).
ch_names : list
The names of each channel. Must be a list of strings, with one
string per channel.
ch_indices : list
Index of each channel - must be a collection of unique integers,
one index per channel.
name : str
The name of this layout type.
bg_image : str | ndarray
The image over which sensor axes will be plotted. Either a path to an
image file, or an array that can be plotted with plt.imshow. If
provided, xy points will be normalized by the width/height of this
image. If not, xy points will be normalized by their own min/max.
normalize : bool
Whether to normalize the coordinates to run from 0 to 1. Defaults to
True.
Returns
-------
layout : Layout
A Layout object that can be plotted with plot_topo
functions and methods.
See Also
--------
make_eeg_layout, make_grid_layout
Notes
-----
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
if ch_indices is None:
ch_indices = np.arange(xy.shape[0])
if ch_names is None:
ch_names = ['{}'.format(i) for i in ch_indices]
if len(ch_names) != len(ch_indices):
raise ValueError('# channel names and indices must be equal')
if len(ch_names) != len(xy):
raise ValueError('# channel names and xy vals must be equal')
x, y = xy.copy().astype(float).T
# Normalize xy to 0-1
if bg_image is not None:
# Normalize by image dimensions
img = plt.imread(bg_image) if isinstance(bg_image, str) else bg_image
x /= img.shape[1]
y /= img.shape[0]
elif normalize:
# Normalize x and y by their maxes
for i_dim in [x, y]:
i_dim -= i_dim.min(0)
i_dim /= (i_dim.max(0) - i_dim.min(0))
# Create box and pos variable
box = _box_size(np.vstack([x, y]).T, padding=pad)
box = (0, 0, box[0], box[1])
w, h = [np.array([i] * x.shape[0]) for i in [w, h]]
loc_params = np.vstack([x, y, w, h]).T
layout = Layout(box, loc_params, ch_names, ch_indices, name)
return layout
|
bsd-3-clause
|
pypot/scikit-learn
|
examples/ensemble/plot_adaboost_regression.py
|
311
|
1529
|
"""
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
|
bsd-3-clause
|
festeh/BuildingMachineLearningSystemsWithPython
|
ch05/log_reg_example.py
|
24
|
3203
|
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
from data import CHART_DIR
import numpy as np
from scipy.stats import norm
from matplotlib import pyplot
np.random.seed(3)
num_per_class = 40
X = np.hstack((norm.rvs(2, size=num_per_class, scale=2),
norm.rvs(8, size=num_per_class, scale=3)))
y = np.hstack((np.zeros(num_per_class),
np.ones(num_per_class)))
def lr_model(clf, X):
return 1.0 / (1.0 + np.exp(-(clf.intercept_ + clf.coef_ * X)))
from sklearn.linear_model import LogisticRegression
logclf = LogisticRegression()
print(logclf)
logclf.fit(X.reshape(num_per_class * 2, 1), y)
print(np.exp(logclf.intercept_), np.exp(logclf.coef_.ravel()))
print("P(x=-1)=%.2f\tP(x=7)=%.2f" %
(lr_model(logclf, -1), lr_model(logclf, 7)))
X_test = np.arange(-5, 20, 0.1)
pyplot.figure(figsize=(10, 4))
pyplot.xlim((-5, 20))
pyplot.scatter(X, y, c=y)
pyplot.xlabel("feature value")
pyplot.ylabel("class")
pyplot.grid(True, linestyle='-', color='0.75')
pyplot.savefig(
os.path.join(CHART_DIR, "log_reg_example_data.png"), bbox_inches="tight")
def lin_model(clf, X):
return clf.intercept_ + clf.coef_ * X
from sklearn.linear_model import LinearRegression
clf = LinearRegression()
print(clf)
clf.fit(X.reshape(num_per_class * 2, 1), y)
X_odds = np.arange(0, 1, 0.001)
pyplot.figure(figsize=(10, 4))
pyplot.subplot(1, 2, 1)
pyplot.scatter(X, y, c=y)
pyplot.plot(X_test, lin_model(clf, X_test))
pyplot.xlabel("feature value")
pyplot.ylabel("class")
pyplot.title("linear fit on original data")
pyplot.grid(True, linestyle='-', color='0.75')
X_ext = np.hstack((X, norm.rvs(20, size=100, scale=5)))
y_ext = np.hstack((y, np.ones(100)))
clf = LinearRegression()
clf.fit(X_ext.reshape(num_per_class * 2 + 100, 1), y_ext)
pyplot.subplot(1, 2, 2)
pyplot.scatter(X_ext, y_ext, c=y_ext)
pyplot.plot(X_ext, lin_model(clf, X_ext))
pyplot.xlabel("feature value")
pyplot.ylabel("class")
pyplot.title("linear fit on additional data")
pyplot.grid(True, linestyle='-', color='0.75')
pyplot.savefig(
os.path.join(CHART_DIR, "log_reg_log_linear_fit.png"), bbox_inches="tight")
pyplot.figure(figsize=(10, 4))
pyplot.xlim((-5, 20))
pyplot.scatter(X, y, c=y)
pyplot.plot(X_test, lr_model(logclf, X_test).ravel())
pyplot.plot(X_test, np.ones(X_test.shape[0]) * 0.5, "--")
pyplot.xlabel("feature value")
pyplot.ylabel("class")
pyplot.grid(True, linestyle='-', color='0.75')
pyplot.savefig(
os.path.join(CHART_DIR, "log_reg_example_fitted.png"), bbox_inches="tight")
X = np.arange(0, 1, 0.001)
pyplot.figure(figsize=(10, 4))
pyplot.subplot(1, 2, 1)
pyplot.xlim((0, 1))
pyplot.ylim((0, 10))
pyplot.plot(X, X / (1 - X))
pyplot.xlabel("P")
pyplot.ylabel("odds = P / (1-P)")
pyplot.grid(True, linestyle='-', color='0.75')
pyplot.subplot(1, 2, 2)
pyplot.xlim((0, 1))
pyplot.plot(X, np.log(X / (1 - X)))
pyplot.xlabel("P")
pyplot.ylabel("log(odds) = log(P / (1-P))")
pyplot.grid(True, linestyle='-', color='0.75')
pyplot.savefig(
os.path.join(CHART_DIR, "log_reg_log_odds.png"), bbox_inches="tight")
|
mit
|
MartinDelzant/scikit-learn
|
sklearn/ensemble/__init__.py
|
217
|
1307
|
"""
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
|
bsd-3-clause
|
tylerjereddy/scipy
|
scipy/spatial/_spherical_voronoi.py
|
7
|
13692
|
"""
Spherical Voronoi Code
.. versionadded:: 0.18.0
"""
#
# Copyright (C) Tyler Reddy, Ross Hemsley, Edd Edmondson,
# Nikolai Nowaczyk, Joe Pitt-Francis, 2015.
#
# Distributed under the same BSD license as SciPy.
#
import warnings
import numpy as np
import scipy
from . import _voronoi
from scipy.spatial import cKDTree
__all__ = ['SphericalVoronoi']
def calculate_solid_angles(R):
"""Calculates the solid angles of plane triangles. Implements the method of
Van Oosterom and Strackee [VanOosterom]_ with some modifications. Assumes
that input points have unit norm."""
# Original method uses a triple product `R1 . (R2 x R3)` for the numerator.
# This is equal to the determinant of the matrix [R1 R2 R3], which can be
# computed with better stability.
numerator = np.linalg.det(R)
denominator = 1 + (np.einsum('ij,ij->i', R[:, 0], R[:, 1]) +
np.einsum('ij,ij->i', R[:, 1], R[:, 2]) +
np.einsum('ij,ij->i', R[:, 2], R[:, 0]))
return np.abs(2 * np.arctan2(numerator, denominator))
class SphericalVoronoi:
""" Voronoi diagrams on the surface of a sphere.
.. versionadded:: 0.18.0
Parameters
----------
points : ndarray of floats, shape (npoints, ndim)
Coordinates of points from which to construct a spherical
Voronoi diagram.
radius : float, optional
Radius of the sphere (Default: 1)
center : ndarray of floats, shape (ndim,)
Center of sphere (Default: origin)
threshold : float
Threshold for detecting duplicate points and
mismatches between points and sphere parameters.
(Default: 1e-06)
Attributes
----------
points : double array of shape (npoints, ndim)
the points in `ndim` dimensions to generate the Voronoi diagram from
radius : double
radius of the sphere
center : double array of shape (ndim,)
center of the sphere
vertices : double array of shape (nvertices, ndim)
Voronoi vertices corresponding to points
regions : list of list of integers of shape (npoints, _ )
the n-th entry is a list consisting of the indices
of the vertices belonging to the n-th point in points
Methods
-------
calculate_areas
Calculates the areas of the Voronoi regions. For 2D point sets, the
regions are circular arcs. The sum of the areas is `2 * pi * radius`.
For 3D point sets, the regions are spherical polygons. The sum of the
areas is `4 * pi * radius**2`.
Raises
------
ValueError
If there are duplicates in `points`.
If the provided `radius` is not consistent with `points`.
Notes
-----
The spherical Voronoi diagram algorithm proceeds as follows. The Convex
Hull of the input points (generators) is calculated, and is equivalent to
their Delaunay triangulation on the surface of the sphere [Caroli]_.
The Convex Hull neighbour information is then used to
order the Voronoi region vertices around each generator. The latter
approach is substantially less sensitive to floating point issues than
angle-based methods of Voronoi region vertex sorting.
Empirical assessment of spherical Voronoi algorithm performance suggests
quadratic time complexity (loglinear is optimal, but algorithms are more
challenging to implement).
References
----------
.. [Caroli] Caroli et al. Robust and Efficient Delaunay triangulations of
points on or close to a sphere. Research Report RR-7004, 2009.
.. [VanOosterom] Van Oosterom and Strackee. The solid angle of a plane
triangle. IEEE Transactions on Biomedical Engineering,
2, 1983, pp 125--126.
See Also
--------
Voronoi : Conventional Voronoi diagrams in N dimensions.
Examples
--------
Do some imports and take some points on a cube:
>>> import matplotlib.pyplot as plt
>>> from scipy.spatial import SphericalVoronoi, geometric_slerp
>>> from mpl_toolkits.mplot3d import proj3d
>>> # set input data
>>> points = np.array([[0, 0, 1], [0, 0, -1], [1, 0, 0],
... [0, 1, 0], [0, -1, 0], [-1, 0, 0], ])
Calculate the spherical Voronoi diagram:
>>> radius = 1
>>> center = np.array([0, 0, 0])
>>> sv = SphericalVoronoi(points, radius, center)
Generate plot:
>>> # sort vertices (optional, helpful for plotting)
>>> sv.sort_vertices_of_regions()
>>> t_vals = np.linspace(0, 1, 2000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, projection='3d')
>>> # plot the unit sphere for reference (optional)
>>> u = np.linspace(0, 2 * np.pi, 100)
>>> v = np.linspace(0, np.pi, 100)
>>> x = np.outer(np.cos(u), np.sin(v))
>>> y = np.outer(np.sin(u), np.sin(v))
>>> z = np.outer(np.ones(np.size(u)), np.cos(v))
>>> ax.plot_surface(x, y, z, color='y', alpha=0.1)
>>> # plot generator points
>>> ax.scatter(points[:, 0], points[:, 1], points[:, 2], c='b')
>>> # plot Voronoi vertices
>>> ax.scatter(sv.vertices[:, 0], sv.vertices[:, 1], sv.vertices[:, 2],
... c='g')
>>> # indicate Voronoi regions (as Euclidean polygons)
>>> for region in sv.regions:
... n = len(region)
... for i in range(n):
... start = sv.vertices[region][i]
... end = sv.vertices[region][(i + 1) % n]
... result = geometric_slerp(start, end, t_vals)
... ax.plot(result[..., 0],
... result[..., 1],
... result[..., 2],
... c='k')
>>> ax.azim = 10
>>> ax.elev = 40
>>> _ = ax.set_xticks([])
>>> _ = ax.set_yticks([])
>>> _ = ax.set_zticks([])
>>> fig.set_size_inches(4, 4)
>>> plt.show()
"""
def __init__(self, points, radius=1, center=None, threshold=1e-06):
if radius is None:
radius = 1.
warnings.warn('`radius` is `None`. '
'This will raise an error in a future version. '
'Please provide a floating point number '
'(i.e. `radius=1`).',
DeprecationWarning)
self.radius = float(radius)
self.points = np.array(points).astype(np.double)
self._dim = len(points[0])
if center is None:
self.center = np.zeros(self._dim)
else:
self.center = np.array(center, dtype=float)
# test degenerate input
self._rank = np.linalg.matrix_rank(self.points - self.points[0],
tol=threshold * self.radius)
if self._rank < self._dim:
raise ValueError("Rank of input points must be at least {0}".format(self._dim))
if cKDTree(self.points).query_pairs(threshold * self.radius):
raise ValueError("Duplicate generators present.")
radii = np.linalg.norm(self.points - self.center, axis=1)
max_discrepancy = np.abs(radii - self.radius).max()
if max_discrepancy >= threshold * self.radius:
raise ValueError("Radius inconsistent with generators.")
self._calc_vertices_regions()
def _calc_vertices_regions(self):
"""
Calculates the Voronoi vertices and regions of the generators stored
in self.points. The vertices will be stored in self.vertices and the
regions in self.regions.
This algorithm was discussed at PyData London 2015 by
Tyler Reddy, Ross Hemsley and Nikolai Nowaczyk
"""
# get Convex Hull
conv = scipy.spatial.ConvexHull(self.points)
# get circumcenters of Convex Hull triangles from facet equations
# for 3D input circumcenters will have shape: (2N-4, 3)
self.vertices = self.radius * conv.equations[:, :-1] + self.center
self._simplices = conv.simplices
# calculate regions from triangulation
# for 3D input simplex_indices will have shape: (2N-4,)
simplex_indices = np.arange(len(self._simplices))
# for 3D input tri_indices will have shape: (6N-12,)
tri_indices = np.column_stack([simplex_indices] * self._dim).ravel()
# for 3D input point_indices will have shape: (6N-12,)
point_indices = self._simplices.ravel()
# for 3D input indices will have shape: (6N-12,)
indices = np.argsort(point_indices, kind='mergesort')
# for 3D input flattened_groups will have shape: (6N-12,)
flattened_groups = tri_indices[indices].astype(np.intp)
# intervals will have shape: (N+1,)
intervals = np.cumsum(np.bincount(point_indices + 1))
# split flattened groups to get nested list of unsorted regions
groups = [list(flattened_groups[intervals[i]:intervals[i + 1]])
for i in range(len(intervals) - 1)]
self.regions = groups
def sort_vertices_of_regions(self):
"""Sort indices of the vertices to be (counter-)clockwise ordered.
Raises
------
TypeError
If the points are not three-dimensional.
Notes
-----
For each region in regions, it sorts the indices of the Voronoi
vertices such that the resulting points are in a clockwise or
counterclockwise order around the generator point.
This is done as follows: Recall that the n-th region in regions
surrounds the n-th generator in points and that the k-th
Voronoi vertex in vertices is the circumcenter of the k-th triangle
in self._simplices. For each region n, we choose the first triangle
(=Voronoi vertex) in self._simplices and a vertex of that triangle
not equal to the center n. These determine a unique neighbor of that
triangle, which is then chosen as the second triangle. The second
triangle will have a unique vertex not equal to the current vertex or
the center. This determines a unique neighbor of the second triangle,
which is then chosen as the third triangle and so forth. We proceed
through all the triangles (=Voronoi vertices) belonging to the
generator in points and obtain a sorted version of the vertices
of its surrounding region.
"""
if self._dim != 3:
raise TypeError("Only supported for three-dimensional point sets")
_voronoi.sort_vertices_of_regions(self._simplices, self.regions)
def _calculate_areas_3d(self):
self.sort_vertices_of_regions()
sizes = [len(region) for region in self.regions]
csizes = np.cumsum(sizes)
num_regions = csizes[-1]
# We create a set of triangles consisting of one point and two Voronoi
# vertices. The vertices of each triangle are adjacent in the sorted
# regions list.
point_indices = [i for i, size in enumerate(sizes)
for j in range(size)]
nbrs1 = np.array([r for region in self.regions for r in region])
# The calculation of nbrs2 is a vectorized version of:
# np.array([r for region in self.regions for r in np.roll(region, 1)])
nbrs2 = np.roll(nbrs1, 1)
indices = np.roll(csizes, 1)
indices[0] = 0
nbrs2[indices] = nbrs1[csizes - 1]
# Normalize points and vertices.
pnormalized = (self.points - self.center) / self.radius
vnormalized = (self.vertices - self.center) / self.radius
# Create the complete set of triangles and calculate their solid angles
triangles = np.hstack([pnormalized[point_indices],
vnormalized[nbrs1],
vnormalized[nbrs2]
]).reshape((num_regions, 3, 3))
triangle_solid_angles = calculate_solid_angles(triangles)
# Sum the solid angles of the triangles in each region
solid_angles = np.cumsum(triangle_solid_angles)[csizes - 1]
solid_angles[1:] -= solid_angles[:-1]
# Get polygon areas using A = omega * r**2
return solid_angles * self.radius**2
def _calculate_areas_2d(self):
# Find start and end points of arcs
arcs = self.points[self._simplices] - self.center
# Calculate the angle subtended by arcs
cosine = np.einsum('ij,ij->i', arcs[:, 0], arcs[:, 1])
sine = np.abs(np.linalg.det(arcs))
theta = np.arctan2(sine, cosine)
# Get areas using A = r * theta
areas = self.radius * theta
# Correct arcs which go the wrong way (single-hemisphere inputs)
signs = np.sign(np.einsum('ij,ij->i', arcs[:, 0],
self.vertices - self.center))
indices = np.where(signs < 0)
areas[indices] = 2 * np.pi * self.radius - areas[indices]
return areas
def calculate_areas(self):
"""Calculates the areas of the Voronoi regions.
For 2D point sets, the regions are circular arcs. The sum of the areas
is `2 * pi * radius`.
For 3D point sets, the regions are spherical polygons. The sum of the
areas is `4 * pi * radius**2`.
.. versionadded:: 1.5.0
Returns
-------
areas : double array of shape (npoints,)
The areas of the Voronoi regions.
"""
if self._dim == 2:
return self._calculate_areas_2d()
elif self._dim == 3:
return self._calculate_areas_3d()
else:
raise TypeError("Only supported for 2D and 3D point sets")
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.