repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
birdsarah/bokeh
|
bokeh/server/tests/config/test_blaze_config.py
|
29
|
1202
|
from __future__ import absolute_import
import numpy as np
import pandas as pd
qty=10000
gauss = {'oneA': np.random.randn(qty),
'oneB': np.random.randn(qty),
'cats': np.random.randint(0,5,size=qty),
'hundredA': np.random.randn(qty)*100,
'hundredB': np.random.randn(qty)*100}
gauss = pd.DataFrame(gauss)
uniform = {'oneA': np.random.rand(qty),
'oneB': np.random.rand(qty),
'hundredA': np.random.rand(qty)*100,
'hundredB': np.random.rand(qty)*100}
uniform = pd.DataFrame(uniform)
bivariate = {'A1': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+1]),
'A2': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+2]),
'A3': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+3]),
'A4': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+4]),
'A5': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+5]),
'B': np.random.randn(qty),
'C': np.hstack([np.zeros(qty/2), np.ones(qty/2)])}
bivariate = pd.DataFrame(bivariate)
data_dict = dict(uniform=uniform,
gauss=gauss,
bivariate=bivariate)
|
bsd-3-clause
|
mayankjohri/LetsExplorePython
|
Section 3 - Machine Learning/libs/core_libs/matplotlib/fill_between_demo.py
|
2
|
2107
|
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(0.0, 2, 0.01)
y1 = np.sin(2*np.pi*x)
y2 = 1.2*np.sin(4*np.pi*x)
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True)
ax1.fill_between(x, 0, y1)
ax1.set_ylabel('between y1 and 0')
ax2.fill_between(x, y1, 1)
ax2.set_ylabel('between y1 and 1')
ax3.fill_between(x, y1, y2)
ax3.set_ylabel('between y1 and y2')
ax3.set_xlabel('x')
# now fill between y1 and y2 where a logical condition is met. Note
# this is different than calling
# fill_between(x[where], y1[where],y2[where]
# because of edge effects over multiple contiguous regions.
fig, (ax, ax1) = plt.subplots(2, 1, sharex=True)
ax.plot(x, y1, x, y2, color='black')
ax.fill_between(x, y1, y2, where=y2 >= y1, facecolor='green', interpolate=True)
ax.fill_between(x, y1, y2, where=y2 <= y1, facecolor='red', interpolate=True)
ax.set_title('fill between where')
# Test support for masked arrays.
y2 = np.ma.masked_greater(y2, 1.0)
ax1.plot(x, y1, x, y2, color='black')
ax1.fill_between(x, y1, y2, where=y2 >= y1, facecolor='green', interpolate=True)
ax1.fill_between(x, y1, y2, where=y2 <= y1, facecolor='red', interpolate=True)
ax1.set_title('Now regions with y2>1 are masked')
# This example illustrates a problem; because of the data
# gridding, there are undesired unfilled triangles at the crossover
# points. A brute-force solution would be to interpolate all
# arrays to a very fine grid before plotting.
# show how to use transforms to create axes spans where a certain condition is satisfied
fig, ax = plt.subplots()
y = np.sin(4*np.pi*x)
ax.plot(x, y, color='black')
# use the data coordinates for the x-axis and the axes coordinates for the y-axis
import matplotlib.transforms as mtransforms
trans = mtransforms.blended_transform_factory(ax.transData, ax.transAxes)
theta = 0.9
ax.axhline(theta, color='green', lw=2, alpha=0.5)
ax.axhline(-theta, color='red', lw=2, alpha=0.5)
ax.fill_between(x, 0, 1, where=y > theta, facecolor='green', alpha=0.5, transform=trans)
ax.fill_between(x, 0, 1, where=y < -theta, facecolor='red', alpha=0.5, transform=trans)
plt.show()
|
gpl-3.0
|
berkeley-stat159/project-alpha
|
code/utils/scripts/smooth_script.py
|
1
|
1613
|
""" Script for smooth function.
Run with:
python smooth_script.py
in the scripts directory
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
import nibabel as nib
import os
import sys
# Relative path to subject 1 data
pathtodata = "../../../data/ds009/sub001/"
condition_location=pathtodata+"model/model001/onsets/task001_run001/"
location_of_images="../../../images/"
#sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/"))
sys.path.append("../functions")
# Load Functions
from stimuli import events2neural
from event_related_fMRI_functions import hrf_single, convolution_specialized
from glm import glm, glm_diagnostics, glm_multiple
from smooth import smoothvoxels
from Image_Visualizing import present_3d
# Load the image data for subject 1.
img = nib.load(pathtodata+"BOLD/task001_run001/bold.nii.gz")
data = img.get_data()
data = data[...,6:] # Knock off the first 6 observations.
#######################
# a. (my) smoothing #
#######################
# Kind of arbitrary chosen time
time = 7
original_slice = data[..., 7]
# full width at half maximum (FWHM)
fwhm = 1.5
smoothed_slice = smoothvoxels(data, fwhm, time)
# visually compare original_slice to smoothed_slice
plt.imshow(present_3d(smoothed_slice))
plt.colorbar()
plt.title('Smoothed Slice')
plt.clim(0,1600)
plt.savefig(location_of_images+"smoothed_slice.png")
plt.close()
plt.imshow(present_3d(original_slice))
plt.colorbar()
plt.title('Original Slice')
plt.clim(0,1600)
plt.savefig(location_of_images+"original_slice.png")
plt.close()
|
bsd-3-clause
|
zyc9012/sdrl
|
sdrl/Gui/Utils.py
|
1
|
5618
|
#-*- coding: utf-8 -*-
from sdrl.Gui.Agents.CommonAgentDialog import CommonAgentDialog
from sdrl.Gui.Representations.RBF import RBFDialog
from sdrl.Gui.Representations.Tabular import TabularDialog
from sdrl.Gui.Representations.KernelizediFDD import KernelizediFDDDialog
from sdrl.Gui.Representations.iFDD import iFDDDialog
from sdrl.Gui.Representations.TileCoding import TileCodingDialog
from sdrl.Gui.Policies.eGreedy import eGreedyDialog
from sdrl.Gui.Policies.SwimmerPolicy import SwimmerPolicyDialog
from rlpy.Representations import *
from rlpy.Agents import *
from rlpy.Policies import *
from rlpy.Experiments import *
import matplotlib
'''
所有设置窗口全放在这里
没有参数设置的设为None
'''
DialogMapping = {'QLearning':CommonAgentDialog,
'Sarsa':CommonAgentDialog,
'Greedy_GQ':CommonAgentDialog,
'RBF':RBFDialog,
'Tabular':TabularDialog,
'KernelizediFDD':KernelizediFDDDialog,
'IncrementalTabular':TabularDialog,
'IndependentDiscretization':TabularDialog,
'TileCoding':TileCodingDialog,
'iFDD':iFDDDialog,
'eGreedy':eGreedyDialog,
'SwimmerPolicy':SwimmerPolicyDialog,
'UniformRandom':None,
'Gibbs':None,
}
class RepresentationFactory(object):
@staticmethod
def get(config, name, domain):
if name in config:
config = config[name]
if name == 'Tabular':
return Tabular(domain, discretization=config['discretization'])
elif name == 'IncrementalTabular':
return IncrementalTabular(domain, discretization=config['discretization'])
elif name == 'IndependentDiscretization':
return IndependentDiscretization(domain, discretization=config['discretization'])
elif name == 'RBF':
return RBF(domain, num_rbfs=config['num_rbfs'],
resolution_max=config['resolution_max'], resolution_min=config['resolution_min'],
const_feature=False, normalize=True, seed=1)
elif name == 'KernelizediFDD':
return KernelizediFDD(domain,sparsify=config['sparsify'],
kernel=config['kernel'],kernel_args=config['kernel_args'],
active_threshold=config['active_threshold'],
discover_threshold=config['discover_threshold'],
max_active_base_feat=config['max_active_base_feat'],
max_base_feat_sim=config['max_base_feat_sim'])
elif name == 'iFDD':
initial_rep = IndependentDiscretization(domain)
return iFDD(domain, discovery_threshold=config['discover_threshold'],initial_representation=initial_rep,discretization=config['discretization'],iFDDPlus=1 - 1e-7)
elif name == 'TileCoding':
return TileCoding(domain, memory = config['memory'], num_tilings=config['num_tilings'])
class PolicyFactory(object):
@staticmethod
def get(config, name, representation):
if name in config:
config = config[name]
if name == 'eGreedy':
return eGreedy(representation, epsilon=config['epsilon'])
elif name == 'UniformRandom':
return UniformRandom(representation)
elif name == 'Gibbs':
return GibbsPolicy(representation)
elif name == 'SwimmerPolicy':
return SwimmerPolicy.SwimmerPolicy(representation, epsilon=config['epsilon'])
class AgentFactory(object):
@staticmethod
def _commonAgentGet(config, name, representation, policy, agentClass):
'''通用Agent的get'''
return agentClass(representation=representation, policy=policy,
discount_factor=config['gamma'],
initial_learn_rate=config['alpha'],
learn_rate_decay_mode=config['alpha_decay_mode'], boyan_N0=config['boyan_N0'],
lambda_=config['lambda'])
@staticmethod
def get(config, name, representation, policy):
if name in config:
config = config[name]
if name == 'QLearning':
return AgentFactory._commonAgentGet(config, name, representation, policy, Q_Learning)
elif name == 'Sarsa':
return AgentFactory._commonAgentGet(config, name, representation, policy, SARSA)
elif name == 'Greedy_GQ':
return AgentFactory._commonAgentGet(config, name, representation, policy, Greedy_GQ)
class ExperimentFactory(object):
@staticmethod
def get(**opt):
if matplotlib.get_backend().lower() == 'qt4agg':
from PyQt4 import QtGui
'''
当matplotlib的backend使用qt4agg时,interactive mode会卡住
这里对Experiemnt进行hack,手动处理event loop
'''
class QtPlottingExperiment(Experiment):
'''
选择hack这个函数是因为它在每个step都会被调用一次
从而可以在每个画图周期处理event loop
与函数本身的作用无关
'''
def _gather_transition_statistics(self, s, a, sn, r, learning=False):
super(QtPlottingExperiment, self)._gather_transition_statistics(s, a, sn, r, learning)
QtGui.qApp.processEvents()
return QtPlottingExperiment(**opt)
else:
return Experiment(**opt)
|
gpl-2.0
|
trenton3983/Data_Science_from_Scratch
|
code-python3/introduction.py
|
6
|
8194
|
# at this stage in the book we haven't actually installed matplotlib,
# comment this out if you need to
from matplotlib import pyplot as plt
##########################
# #
# FINDING KEY CONNECTORS #
# #
##########################
users = [
{ "id": 0, "name": "Hero" },
{ "id": 1, "name": "Dunn" },
{ "id": 2, "name": "Sue" },
{ "id": 3, "name": "Chi" },
{ "id": 4, "name": "Thor" },
{ "id": 5, "name": "Clive" },
{ "id": 6, "name": "Hicks" },
{ "id": 7, "name": "Devin" },
{ "id": 8, "name": "Kate" },
{ "id": 9, "name": "Klein" },
{ "id": 10, "name": "Jen" }
]
friendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),
(4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]
# first give each user an empty list
for user in users:
user["friends"] = []
# and then populate the lists with friendships
for i, j in friendships:
# this works because users[i] is the user whose id is i
users[i]["friends"].append(users[j]) # add i as a friend of j
users[j]["friends"].append(users[i]) # add j as a friend of i
def number_of_friends(user):
"""how many friends does _user_ have?"""
return len(user["friends"]) # length of friend_ids list
total_connections = sum(number_of_friends(user)
for user in users) # 24
num_users = len(users)
avg_connections = total_connections / num_users # 2.4
################################
# #
# DATA SCIENTISTS YOU MAY KNOW #
# #
################################
def friends_of_friend_ids_bad(user):
# "foaf" is short for "friend of a friend"
return [foaf["id"]
for friend in user["friends"] # for each of user's friends
for foaf in friend["friends"]] # get each of _their_ friends
from collections import Counter # not loaded by default
def not_the_same(user, other_user):
"""two users are not the same if they have different ids"""
return user["id"] != other_user["id"]
def not_friends(user, other_user):
"""other_user is not a friend if he's not in user["friends"];
that is, if he's not_the_same as all the people in user["friends"]"""
return all(not_the_same(friend, other_user)
for friend in user["friends"])
def friends_of_friend_ids(user):
return Counter(foaf["id"]
for friend in user["friends"] # for each of my friends
for foaf in friend["friends"] # count *their* friends
if not_the_same(user, foaf) # who aren't me
and not_friends(user, foaf)) # and aren't my friends
print(friends_of_friend_ids(users[3])) # Counter({0: 2, 5: 1})
interests = [
(0, "Hadoop"), (0, "Big Data"), (0, "HBase"), (0, "Java"),
(0, "Spark"), (0, "Storm"), (0, "Cassandra"),
(1, "NoSQL"), (1, "MongoDB"), (1, "Cassandra"), (1, "HBase"),
(1, "Postgres"), (2, "Python"), (2, "scikit-learn"), (2, "scipy"),
(2, "numpy"), (2, "statsmodels"), (2, "pandas"), (3, "R"), (3, "Python"),
(3, "statistics"), (3, "regression"), (3, "probability"),
(4, "machine learning"), (4, "regression"), (4, "decision trees"),
(4, "libsvm"), (5, "Python"), (5, "R"), (5, "Java"), (5, "C++"),
(5, "Haskell"), (5, "programming languages"), (6, "statistics"),
(6, "probability"), (6, "mathematics"), (6, "theory"),
(7, "machine learning"), (7, "scikit-learn"), (7, "Mahout"),
(7, "neural networks"), (8, "neural networks"), (8, "deep learning"),
(8, "Big Data"), (8, "artificial intelligence"), (9, "Hadoop"),
(9, "Java"), (9, "MapReduce"), (9, "Big Data")
]
def data_scientists_who_like(target_interest):
return [user_id
for user_id, user_interest in interests
if user_interest == target_interest]
from collections import defaultdict
# keys are interests, values are lists of user_ids with that interest
user_ids_by_interest = defaultdict(list)
for user_id, interest in interests:
user_ids_by_interest[interest].append(user_id)
# keys are user_ids, values are lists of interests for that user_id
interests_by_user_id = defaultdict(list)
for user_id, interest in interests:
interests_by_user_id[user_id].append(interest)
def most_common_interests_with(user_id):
return Counter(interested_user_id
for interest in interests_by_user["user_id"]
for interested_user_id in users_by_interest[interest]
if interested_user_id != user_id)
###########################
# #
# SALARIES AND EXPERIENCE #
# #
###########################
salaries_and_tenures = [(83000, 8.7), (88000, 8.1),
(48000, 0.7), (76000, 6),
(69000, 6.5), (76000, 7.5),
(60000, 2.5), (83000, 10),
(48000, 1.9), (63000, 4.2)]
def make_chart_salaries_by_tenure():
tenures = [tenure for salary, tenure in salaries_and_tenures]
salaries = [salary for salary, tenure in salaries_and_tenures]
plt.scatter(tenures, salaries)
plt.xlabel("Years Experience")
plt.ylabel("Salary")
plt.show()
# keys are years
# values are the salaries for each tenure
salary_by_tenure = defaultdict(list)
for salary, tenure in salaries_and_tenures:
salary_by_tenure[tenure].append(salary)
average_salary_by_tenure = {
tenure : sum(salaries) / len(salaries)
for tenure, salaries in salary_by_tenure.items()
}
def tenure_bucket(tenure):
if tenure < 2: return "less than two"
elif tenure < 5: return "between two and five"
else: return "more than five"
salary_by_tenure_bucket = defaultdict(list)
for salary, tenure in salaries_and_tenures:
bucket = tenure_bucket(tenure)
salary_by_tenure_bucket[bucket].append(salary)
average_salary_by_bucket = {
tenure_bucket : sum(salaries) / len(salaries)
for tenure_bucket, salaries in salary_by_tenure_bucket.items()
}
#################
# #
# PAID_ACCOUNTS #
# #
#################
def predict_paid_or_unpaid(years_experience):
if years_experience < 3.0: return "paid"
elif years_experience < 8.5: return "unpaid"
else: return "paid"
######################
# #
# TOPICS OF INTEREST #
# #
######################
words_and_counts = Counter(word
for user, interest in interests
for word in interest.lower().split())
if __name__ == "__main__":
print()
print("######################")
print("#")
print("# FINDING KEY CONNECTORS")
print("#")
print("######################")
print()
print("total connections", total_connections)
print("number of users", num_users)
print("average connections", total_connections / num_users)
print()
# create a list (user_id, number_of_friends)
num_friends_by_id = [(user["id"], number_of_friends(user))
for user in users]
print("users sorted by number of friends:")
print(sorted(num_friends_by_id,
key=lambda pair: pair[1], # by number of friends
reverse=True)) # largest to smallest
print()
print("######################")
print("#")
print("# DATA SCIENTISTS YOU MAY KNOW")
print("#")
print("######################")
print()
print("friends of friends bad for user 0:", friends_of_friend_ids_bad(users[0]))
print("friends of friends for user 3:", friends_of_friend_ids(users[3]))
print()
print("######################")
print("#")
print("# SALARIES AND TENURES")
print("#")
print("######################")
print()
print("average salary by tenure", average_salary_by_tenure)
print("average salary by tenure bucket", average_salary_by_bucket)
print()
print("######################")
print("#")
print("# MOST COMMON WORDS")
print("#")
print("######################")
print()
for word, count in words_and_counts.most_common():
if count > 1:
print(word, count)
|
unlicense
|
jairideout/scikit-bio
|
skbio/diversity/alpha/tests/test_base.py
|
5
|
18232
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from unittest import TestCase, main
from io import StringIO
import os
import numpy as np
import numpy.testing as npt
import pandas as pd
from skbio import TreeNode
from skbio.util import get_data_path
from skbio.tree import DuplicateNodeError, MissingNodeError
from skbio.diversity.alpha import (
berger_parker_d, brillouin_d, dominance, doubles, enspie,
esty_ci, faith_pd, fisher_alpha, goods_coverage, heip_e, kempton_taylor_q,
margalef, mcintosh_d, mcintosh_e, menhinick, michaelis_menten_fit,
observed_otus, osd, pielou_e, robbins, shannon, simpson, simpson_e,
singles, strong)
class BaseTests(TestCase):
def setUp(self):
self.counts = np.array([0, 1, 1, 4, 2, 5, 2, 4, 1, 2])
self.b1 = np.array(
[[1, 3, 0, 1, 0],
[0, 2, 0, 4, 4],
[0, 0, 6, 2, 1],
[0, 0, 1, 1, 1]])
self.sids1 = list('ABCD')
self.oids1 = ['OTU%d' % i for i in range(1, 6)]
self.t1 = TreeNode.read(StringIO(
u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):'
u'0.0,(OTU4:0.75,OTU5:0.75):1.25):0.0)root;'))
self.t1_w_extra_tips = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,(OTU5:0.25,(OTU6:0.5,OTU7:0.5):0.5):0.5):1.25):0.0'
u')root;'))
def test_berger_parker_d(self):
self.assertEqual(berger_parker_d(np.array([5])), 1)
self.assertEqual(berger_parker_d(np.array([5, 5])), 0.5)
self.assertEqual(berger_parker_d(np.array([1, 1, 1, 1, 0])), 0.25)
self.assertEqual(berger_parker_d(self.counts), 5 / 22)
def test_brillouin_d(self):
self.assertAlmostEqual(brillouin_d(np.array([1, 2, 0, 0, 3, 1])),
0.86289353018248782)
def test_dominance(self):
self.assertEqual(dominance(np.array([5])), 1)
self.assertAlmostEqual(dominance(np.array([1, 0, 2, 5, 2])), 0.34)
def test_doubles(self):
self.assertEqual(doubles(self.counts), 3)
self.assertEqual(doubles(np.array([0, 3, 4])), 0)
self.assertEqual(doubles(np.array([2])), 1)
self.assertEqual(doubles(np.array([0, 0])), 0)
def test_enspie(self):
# Totally even community should have ENS_pie = number of OTUs.
self.assertAlmostEqual(enspie(np.array([1, 1, 1, 1, 1, 1])), 6)
self.assertAlmostEqual(enspie(np.array([13, 13, 13, 13])), 4)
# Hand calculated.
arr = np.array([1, 41, 0, 0, 12, 13])
exp = 1 / ((arr / arr.sum()) ** 2).sum()
self.assertAlmostEqual(enspie(arr), exp)
# Using dominance.
exp = 1 / dominance(arr)
self.assertAlmostEqual(enspie(arr), exp)
arr = np.array([1, 0, 2, 5, 2])
exp = 1 / dominance(arr)
self.assertAlmostEqual(enspie(arr), exp)
def test_esty_ci(self):
def _diversity(indices, f):
"""Calculate diversity index for each window of size 1.
indices: vector of indices of OTUs
f: f(counts) -> diversity measure
"""
result = []
max_size = max(indices) + 1
freqs = np.zeros(max_size, dtype=int)
for i in range(len(indices)):
freqs += np.bincount(indices[i:i + 1], minlength=max_size)
try:
curr = f(freqs)
except (ZeroDivisionError, FloatingPointError):
curr = 0
result.append(curr)
return np.array(result)
data = [1, 1, 2, 1, 1, 3, 2, 1, 3, 4]
observed_lower, observed_upper = zip(*_diversity(data, esty_ci))
expected_lower = np.array([1, -1.38590382, -0.73353593, -0.17434465,
-0.15060902, -0.04386191, -0.33042054,
-0.29041008, -0.43554755, -0.33385652])
expected_upper = np.array([1, 1.38590382, 1.40020259, 0.67434465,
0.55060902, 0.71052858, 0.61613483,
0.54041008, 0.43554755, 0.53385652])
npt.assert_array_almost_equal(observed_lower, expected_lower)
npt.assert_array_almost_equal(observed_upper, expected_upper)
def test_faith_pd_none_observed(self):
actual = faith_pd(np.array([], dtype=int), np.array([], dtype=int),
self.t1)
expected = 0.0
self.assertAlmostEqual(actual, expected)
actual = faith_pd([0, 0, 0, 0, 0], self.oids1, self.t1)
expected = 0.0
self.assertAlmostEqual(actual, expected)
def test_faith_pd_all_observed(self):
actual = faith_pd([1, 1, 1, 1, 1], self.oids1, self.t1)
expected = sum(n.length for n in self.t1.traverse()
if n.length is not None)
self.assertAlmostEqual(actual, expected)
actual = faith_pd([1, 2, 3, 4, 5], self.oids1, self.t1)
expected = sum(n.length for n in self.t1.traverse()
if n.length is not None)
self.assertAlmostEqual(actual, expected)
def test_faith_pd(self):
# expected results derived from QIIME 1.9.1, which
# is a completely different implementation skbio's initial
# phylogenetic diversity implementation
actual = faith_pd(self.b1[0], self.oids1, self.t1)
expected = 4.5
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[1], self.oids1, self.t1)
expected = 4.75
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[2], self.oids1, self.t1)
expected = 4.75
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[3], self.oids1, self.t1)
expected = 4.75
self.assertAlmostEqual(actual, expected)
def test_faith_pd_extra_tips(self):
# results are the same despite presences of unobserved tips in tree
actual = faith_pd(self.b1[0], self.oids1, self.t1_w_extra_tips)
expected = faith_pd(self.b1[0], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[1], self.oids1, self.t1_w_extra_tips)
expected = faith_pd(self.b1[1], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[2], self.oids1, self.t1_w_extra_tips)
expected = faith_pd(self.b1[2], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[3], self.oids1, self.t1_w_extra_tips)
expected = faith_pd(self.b1[3], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
def test_faith_pd_minimal_trees(self):
# expected values computed by hand
# zero tips
tree = TreeNode.read(StringIO(u'root;'))
actual = faith_pd(np.array([], dtype=int), [], tree)
expected = 0.0
self.assertEqual(actual, expected)
# two tips
tree = TreeNode.read(StringIO(u'(OTU1:0.25, OTU2:0.25)root;'))
actual = faith_pd([1, 0], ['OTU1', 'OTU2'], tree)
expected = 0.25
self.assertEqual(actual, expected)
def test_faith_pd_qiime_tiny_test(self):
# the following table and tree are derived from the QIIME 1.9.1
# "tiny-test" data
tt_table_fp = get_data_path(
os.path.join('qiime-191-tt', 'otu-table.tsv'), 'data')
tt_tree_fp = get_data_path(
os.path.join('qiime-191-tt', 'tree.nwk'), 'data')
self.q_table = pd.read_csv(tt_table_fp, sep='\t', skiprows=1,
index_col=0)
self.q_tree = TreeNode.read(tt_tree_fp)
expected_fp = get_data_path(
os.path.join('qiime-191-tt', 'faith-pd.txt'), 'data')
expected = pd.read_csv(expected_fp, sep='\t', index_col=0)
for sid in self.q_table.columns:
actual = faith_pd(self.q_table[sid], otu_ids=self.q_table.index,
tree=self.q_tree)
self.assertAlmostEqual(actual, expected['PD_whole_tree'][sid])
def test_faith_pd_root_not_observed(self):
# expected values computed by hand
tree = TreeNode.read(
StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, (OTU3:0.5, OTU4:0.7):1.1)'
u'root;'))
otu_ids = ['OTU%d' % i for i in range(1, 5)]
# root node not observed, but branch between (OTU1, OTU2) and root
# is considered observed
actual = faith_pd([1, 1, 0, 0], otu_ids, tree)
expected = 0.6
self.assertAlmostEqual(actual, expected)
# root node not observed, but branch between (OTU3, OTU4) and root
# is considered observed
actual = faith_pd([0, 0, 1, 1], otu_ids, tree)
expected = 2.3
self.assertAlmostEqual(actual, expected)
def test_faith_pd_invalid_input(self):
# Many of these tests are duplicated from
# skbio.diversity.tests.test_base, but I think it's important to
# confirm that they are being run when faith_pd is called.
# tree has duplicated tip ids
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU2:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(DuplicateNodeError, faith_pd, counts, otu_ids, t)
# unrooted tree as input
t = TreeNode.read(StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
u'OTU4:0.7);'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# otu_ids has duplicated ids
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU2']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# len of vectors not equal
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# negative counts
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, -3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# tree with no branch lengths
t = TreeNode.read(
StringIO(u'((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# tree missing some branch lengths
t = TreeNode.read(
StringIO(u'(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# otu_ids not present in tree
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU42']
self.assertRaises(MissingNodeError, faith_pd, counts, otu_ids, t)
def test_fisher_alpha(self):
exp = 2.7823795367398798
arr = np.array([4, 3, 4, 0, 1, 0, 2])
obs = fisher_alpha(arr)
self.assertAlmostEqual(obs, exp)
# Should depend only on S and N (number of OTUs, number of
# individuals / seqs), so we should obtain the same output as above.
obs = fisher_alpha([1, 6, 1, 0, 1, 0, 5])
self.assertAlmostEqual(obs, exp)
# Should match another by hand:
# 2 OTUs, 62 seqs, alpha is 0.39509
obs = fisher_alpha([61, 0, 0, 1])
self.assertAlmostEqual(obs, 0.39509, delta=0.0001)
# Test case where we have >1000 individuals (SDR-IV makes note of this
# case). Verified against R's vegan::fisher.alpha.
obs = fisher_alpha([999, 0, 10])
self.assertAlmostEqual(obs, 0.2396492)
def test_goods_coverage(self):
counts = [1] * 75 + [2, 2, 2, 2, 2, 2, 3, 4, 4]
obs = goods_coverage(counts)
self.assertAlmostEqual(obs, 0.23469387755)
def test_heip_e(self):
# Calculate "by hand".
arr = np.array([1, 2, 3, 1])
h = shannon(arr, base=np.e)
expected = (np.exp(h) - 1) / 3
self.assertEqual(heip_e(arr), expected)
# From Statistical Ecology: A Primer in Methods and Computing, page 94,
# table 8.1.
self.assertAlmostEqual(heip_e([500, 300, 200]), 0.90, places=2)
self.assertAlmostEqual(heip_e([500, 299, 200, 1]), 0.61, places=2)
def test_kempton_taylor_q(self):
# Approximate Magurran 1998 calculation p143.
arr = np.array([2, 3, 3, 3, 3, 3, 4, 4, 4, 6, 6, 7, 7, 9, 9, 11, 14,
15, 15, 20, 29, 33, 34, 36, 37, 53, 57, 138, 146, 170])
exp = 14 / np.log(34 / 4)
self.assertAlmostEqual(kempton_taylor_q(arr), exp)
# Should get same answer regardless of input order.
np.random.shuffle(arr)
self.assertAlmostEqual(kempton_taylor_q(arr), exp)
def test_margalef(self):
self.assertEqual(margalef(self.counts), 8 / np.log(22))
def test_mcintosh_d(self):
self.assertAlmostEqual(mcintosh_d(np.array([1, 2, 3])),
0.636061424871458)
def test_mcintosh_e(self):
num = np.sqrt(15)
den = np.sqrt(19)
exp = num / den
self.assertEqual(mcintosh_e(np.array([1, 2, 3, 1])), exp)
def test_menhinick(self):
# observed_otus = 9, total # of individuals = 22
self.assertEqual(menhinick(self.counts), 9 / np.sqrt(22))
def test_michaelis_menten_fit(self):
obs = michaelis_menten_fit([22])
self.assertAlmostEqual(obs, 1.0)
obs = michaelis_menten_fit([42])
self.assertAlmostEqual(obs, 1.0)
obs = michaelis_menten_fit([34], num_repeats=3, params_guess=(13, 13))
self.assertAlmostEqual(obs, 1.0)
obs = michaelis_menten_fit([70, 70], num_repeats=5)
self.assertAlmostEqual(obs, 2.0, places=1)
obs_few = michaelis_menten_fit(np.arange(4) * 2, num_repeats=10)
obs_many = michaelis_menten_fit(np.arange(4) * 100, num_repeats=10)
# [0,100,200,300] looks like only 3 OTUs.
self.assertAlmostEqual(obs_many, 3.0, places=1)
# [0,2,4,6] looks like 3 OTUs with maybe more to be found.
self.assertTrue(obs_few > obs_many)
def test_observed_otus(self):
obs = observed_otus(np.array([4, 3, 4, 0, 1, 0, 2]))
self.assertEqual(obs, 5)
obs = observed_otus(np.array([0, 0, 0]))
self.assertEqual(obs, 0)
obs = observed_otus(self.counts)
self.assertEqual(obs, 9)
def test_osd(self):
self.assertEqual(osd(self.counts), (9, 3, 3))
def test_pielou_e(self):
# Calculate "by hand".
arr = np.array([1, 2, 3, 1])
h = shannon(arr, np.e)
s = 4
expected = h / np.log(s)
self.assertAlmostEqual(pielou_e(arr), expected)
self.assertAlmostEqual(pielou_e(self.counts), 0.92485490560)
self.assertEqual(pielou_e([1, 1]), 1.0)
self.assertEqual(pielou_e([1, 1, 1, 1]), 1.0)
self.assertEqual(pielou_e([1, 1, 1, 1, 0, 0]), 1.0)
# Examples from
# http://ww2.mdsg.umd.edu/interactive_lessons/biofilm/diverse.htm#3
self.assertAlmostEqual(pielou_e([1, 1, 196, 1, 1]), 0.078, 3)
self.assertTrue(np.isnan(pielou_e([0, 0, 200, 0, 0])))
self.assertTrue(np.isnan(pielou_e([0, 0, 0, 0, 0])))
def test_robbins(self):
self.assertEqual(robbins(np.array([1, 2, 3, 0, 1])), 2 / 7)
def test_shannon(self):
self.assertEqual(shannon(np.array([5])), 0)
self.assertEqual(shannon(np.array([5, 5])), 1)
self.assertEqual(shannon(np.array([1, 1, 1, 1, 0])), 2)
def test_simpson(self):
self.assertAlmostEqual(simpson(np.array([1, 0, 2, 5, 2])), 0.66)
self.assertAlmostEqual(simpson(np.array([5])), 0)
def test_simpson_e(self):
# A totally even community should have simpson_e = 1.
self.assertEqual(simpson_e(np.array([1, 1, 1, 1, 1, 1, 1])), 1)
arr = np.array([0, 30, 25, 40, 0, 0, 5])
freq_arr = arr / arr.sum()
D = (freq_arr ** 2).sum()
exp = 1 / (D * 4)
obs = simpson_e(arr)
self.assertEqual(obs, exp)
# From:
# https://groups.nceas.ucsb.edu/sun/meetings/calculating-evenness-
# of-habitat-distributions
arr = np.array([500, 400, 600, 500])
D = 0.0625 + 0.04 + 0.09 + 0.0625
exp = 1 / (D * 4)
self.assertEqual(simpson_e(arr), exp)
def test_singles(self):
self.assertEqual(singles(self.counts), 3)
self.assertEqual(singles(np.array([0, 3, 4])), 0)
self.assertEqual(singles(np.array([1])), 1)
self.assertEqual(singles(np.array([0, 0])), 0)
def test_strong(self):
self.assertAlmostEqual(strong(np.array([1, 2, 3, 1])), 0.214285714)
if __name__ == '__main__':
main()
|
bsd-3-clause
|
ivano666/tensorflow
|
tensorflow/contrib/learn/python/learn/tests/test_estimators.py
|
7
|
2438
|
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import random
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
class CustomOptimizer(tf.test.TestCase):
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
# setup exponential decay function
def exp_decay(global_step):
return tf.train.exponential_decay(learning_rate=0.1,
global_step=global_step,
decay_steps=100,
decay_rate=0.001)
def custom_optimizer(learning_rate):
return tf.train.MomentumOptimizer(learning_rate, 0.9)
classifier = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3,
steps=400,
learning_rate=exp_decay,
optimizer=custom_optimizer)
classifier.fit(X_train, y_train)
score = accuracy_score(y_test, classifier.predict(X_test))
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
tf.test.main()
|
apache-2.0
|
jungla/ICOM-fluidity-toolbox
|
Detectors/offline_advection/advect_particles_C_2Db_big.py
|
1
|
4096
|
import os, sys
import myfun
import numpy as np
import lagrangian_stats
import scipy.interpolate as interpolate
import csv
import matplotlib.pyplot as plt
import advect_functions
import fio
from intergrid import Intergrid
## READ archive (too many points... somehow)
# args: name, dayi, dayf, days
#label = 'm_25_2_512'
label = 'm_25_1b_particles'
dayi = 60 #10*24*1
dayf = 500 #10*24*4
days = 1
#label = sys.argv[1]
#basename = sys.argv[2]
#dayi = int(sys.argv[3])
#dayf = int(sys.argv[4])
#days = int(sys.argv[5])
path = '../../2D/U/Velocity_CG/'
time = range(dayi,dayf,days)
# dimensions archives
# ML exp
#Xlist = np.linspace(0,10000,801)
#Ylist = np.linspace(0,4000,321)
Xlist = np.linspace(0,8000,641)
Ylist = np.linspace(0,8000,641)
dl = [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1]
Zlist = 1.*np.cumsum(dl)
maps = [Xlist,Ylist,Zlist]
lo = np.array([ 0, 0, 0])
hi = np.array([ 8000, 8000, 50]) # highest lat, highest lon
#lo = np.array([ 0, 0, 0])
#hi = np.array([ 10000, 4000, 50]) # highest lat, highest lon
[X,Y,Z] = myfun.meshgrid2(Xlist,Ylist,Zlist)
Y = np.reshape(Y,(np.size(Y),))
X = np.reshape(X,(np.size(X),))
Z = np.reshape(Z,(np.size(Z),))
xn = len(Xlist)
yn = len(Ylist)
zn = len(Zlist)
dx = np.gradient(Xlist)
dy = np.gradient(Ylist)
dz = np.gradient(Zlist)
#dt = 360
dt = 1440
time = np.asarray(range(dayi,dayf,days))
print time[0]
# initial particles position
x0 = range(0,5010,10)
y0 = range(0,5010,10)
#z0 = [5,10,17]
#x0 = range(3000,4010,10)
#y0 = range(2000,3010,10)
#z0 = range(1,20,4)
z0 = [0,5,10,15]
xp = len(x0)
yp = len(y0)
zp = len(z0)
pt = xp*yp*zp
[z0,y0,x0] = myfun.meshgrid2(z0,y0,x0)
x0 = np.reshape(x0, (np.size(x0)))
y0 = np.reshape(y0, (np.size(y0)))
z0 = np.reshape(z0, (np.size(z0)))
#levels = np.zeros(x0.shape) + 1.
#levels[np.where(z0 != 2)] = np.nan
#x0 = lo[0] + np.random.uniform( size=(pt) ) * (hi[0] - lo[0])
#y0 = lo[1] + np.random.uniform( size=(pt) ) * (hi[1] - lo[1])
#z0 = lo[2] + np.random.uniform( size=(pt) ) * (hi[2] - lo[2])
#z0 = z0*0-1.
x = np.zeros((pt))
y = np.zeros((pt))
z = np.zeros((pt))
## ADVECT PARTICLES
kick = 5.
#filename = './traj_'+label+'_'+str(dayi)+'_'+str(dayf)+'_3D.csv'
filename = './traj_'+label+'_'+str(dayi)+'_'+str(dayf)+'_2D_big.csv'
print filename
fd = open(filename,'wb')
for p in range(pt):
fd.write(str(x0[p])+', '+str(y0[p])+', '+str(-1.*z0[p])+', '+str(time[0])+'\n')
import random
for t in range(len(time)-1):
print 'time:', time[t]
file0 = path+'Velocity_CG_0_'+label+'_'+str(time[t])+'.csv'
file1 = path+'Velocity_CG_1_'+label+'_'+str(time[t])+'.csv'
file2 = path+'Velocity_CG_2_'+label+'_'+str(time[t])+'.csv'
Ut0 = fio.read_Scalar(file0,xn,yn,zn)
Vt0 = fio.read_Scalar(file1,xn,yn,zn)
Wt0 = 0*Ut0 #-1.*fio.read_Scalar(file2,xn,yn,zn) #0*Ut0
file0 = path+'Velocity_CG_0_'+label+'_'+str(time[t+1])+'.csv'
file1 = path+'Velocity_CG_1_'+label+'_'+str(time[t+1])+'.csv'
file2 = path+'Velocity_CG_2_'+label+'_'+str(time[t+1])+'.csv'
Ut1 = fio.read_Scalar(file0,xn,yn,zn)
Vt1 = fio.read_Scalar(file1,xn,yn,zn)
Wt1 = 0*Ut0 #-1.*fio.read_Scalar(file2,xn,yn,zn) #0*Ut0
# subcycling
nt = 20
ds = 1.*dt / nt
# for st in range(nt+1):
# print st
# Us0 = (Ut1*st + Ut0*(nt-st))/(nt)
# Us1 = (Ut1*(st+1) + Ut0*(nt-st-1))/(nt)
# Vs0 = (Vt1*st + Vt0*(nt-st))/(nt)
# Vs1 = (Vt1*(st+1) + Vt0*(nt-st-1))/(nt)
# Ws0 = (Wt1*st + Wt0*(nt-st))/(nt)
# Ws1 = (Wt1*(st+1) + Wt0*(nt-st-1))/(nt)
# x0,y0,z0 = advect_functions.RK4(x0,y0,z0,Us0,Vs0,Ws0,Us1,Vs1,Ws1,lo,hi,maps,ds)
x0,y0,z0 = advect_functions.RK4(x0,y0,z0,Ut0,Vt0,Wt0,Ut1,Vt1,Wt1,lo,hi,maps,dt)
#x0,y0,z0 = advect_functions.EULER(x0,y0,z0,Ut0,Vt0,Wt0,lo,hi,maps,dt)
# random.seed()
# random kick
# for i in range(len(x0)):
# x0[i] = x0[i] + random.uniform(-kick,kick)
# y0[i] = y0[i] + random.uniform(-kick,kick)
x0,y0,z0 = advect_functions.pBC(x0,y0,z0,lo,hi)
# x1,y1,z1 = x0,y0,z0
# write
for p in range(pt):
fd.write(str(x0[p])+', '+str(y0[p])+', '+str(-1.*z0[p])+', '+str(time[t+1])+'\n')
fd.close()
|
gpl-2.0
|
rth/PyKrige
|
pykrige/uk3d.py
|
1
|
44753
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import scipy.linalg
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
from . import variogram_models
from . import core
from .core import _adjust_for_anisotropy, _initialize_variogram_model, \
_make_variogram_parameter_list, _find_statistics
import warnings
__doc__ = """
PyKrige
=======
Code by Benjamin S. Murphy and the PyKrige Developers
[email protected]
Summary
-------
Contains class UniversalKriging3D.
References
----------
.. [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in
Hydrogeology, (Cambridge University Press, 1997) 272 p.
Copyright (c) 2015-2018, PyKrige Developers
"""
class UniversalKriging3D:
"""Three-dimensional universal kriging
Parameters
----------
x : array_like
X-coordinates of data points.
y : array_like
Y-coordinates of data points.
z : array_like
Z-coordinates of data points.
val : array_like
Values at data points.
variogram_model : str, optional
Specified which variogram model to use; may be one of the following:
linear, power, gaussian, spherical, exponential, hole-effect.
Default is linear variogram model. To utilize a custom variogram model,
specify 'custom'; you must also provide variogram_parameters and
variogram_function. Note that the hole-effect model is only
technically correct for one-dimensional problems.
variogram_parameters : list or dict, optional
Parameters that define the specified variogram model. If not provided,
parameters will be automatically calculated using a "soft" L1 norm
minimization scheme. For variogram model parameters provided in a dict,
the required dict keys vary according to the specified variogram
model: ::
linear - {'slope': slope, 'nugget': nugget}
power - {'scale': scale, 'exponent': exponent, 'nugget': nugget}
gaussian - {'sill': s, 'range': r, 'nugget': n}
OR
{'psill': p, 'range': r, 'nugget':n}
spherical - {'sill': s, 'range': r, 'nugget': n}
OR
{'psill': p, 'range': r, 'nugget':n}
exponential - {'sill': s, 'range': r, 'nugget': n}
OR
{'psill': p, 'range': r, 'nugget':n}
hole-effect - {'sill': s, 'range': r, 'nugget': n}
OR
{'psill': p, 'range': r, 'nugget':n}
Note that either the full sill or the partial sill
(psill = sill - nugget) can be specified in the dict.
For variogram model parameters provided in a list, the entries
must be as follows: ::
linear - [slope, nugget]
power - [scale, exponent, nugget]
gaussian - [sill, range, nugget]
spherical - [sill, range, nugget]
exponential - [sill, range, nugget]
hole-effect - [sill, range, nugget]
Note that the full sill (NOT the partial sill) must be specified
in the list format.
For a custom variogram model, the parameters are required, as custom
variogram models will not automatically be fit to the data.
Furthermore, the parameters must be specified in list format, in the
order in which they are used in the callable function (see
variogram_function for more information). The code does not check
that the provided list contains the appropriate number of parameters
for the custom variogram model, so an incorrect parameter list in
such a case will probably trigger an esoteric exception someplace
deep in the code.
NOTE that, while the list format expects the full sill, the code
itself works internally with the partial sill.
variogram_function : callable, optional
A callable function that must be provided if variogram_model is
specified as 'custom'. The function must take only two arguments:
first, a list of parameters for the variogram model;
second, the distances at which to calculate the variogram model.
The list provided in variogram_parameters will be passed to the
function as the first argument.
nlags : int, optional
Number of averaging bins for the semivariogram. Default is 6.
weight : bool, optional
Flag that specifies if semivariance at smaller lags should be weighted
more heavily when automatically calculating variogram model.
The routine is currently hard-coded such that the weights are
calculated from a logistic function, so weights at small lags are ~1
and weights at the longest lags are ~0; the center of the logistic
weighting is hard-coded to be at 70% of the distance from the shortest
lag to the largest lag. Setting this parameter to True indicates that
weights will be applied. Default is False.
(Kitanidis suggests that the values at smaller lags are more
important in fitting a variogram model, so the option is provided
to enable such weighting.)
anisotropy_scaling_y : float, optional
Scalar stretching value to take into account anisotropy in
the y direction. Default is 1 (effectively no stretching).
Scaling is applied in the y direction in the rotated data frame
(i.e., after adjusting for the anisotropy_angle_x/y/z,
if anisotropy_angle_x/y/z is/are not 0).
anisotropy_scaling_z : float, optional
Scalar stretching value to take into account anisotropy in
the z direction. Default is 1 (effectively no stretching).
Scaling is applied in the z direction in the rotated data frame
(i.e., after adjusting for the anisotropy_angle_x/y/z,
if anisotropy_angle_x/y/z is/are not 0).
anisotropy_angle_x : float, optional
CCW angle (in degrees) by which to rotate coordinate system about
the x axis in order to take into account anisotropy.
Default is 0 (no rotation). Note that the coordinate system is rotated.
X rotation is applied first, then y rotation, then z rotation.
Scaling is applied after rotation.
anisotropy_angle_y : float, optional
CCW angle (in degrees) by which to rotate coordinate system about
the y axis in order to take into account anisotropy.
Default is 0 (no rotation). Note that the coordinate system is rotated.
X rotation is applied first, then y rotation, then z rotation.
Scaling is applied after rotation.
anisotropy_angle_z : float, optional
CCW angle (in degrees) by which to rotate coordinate system about
the z axis in order to take into account anisotropy.
Default is 0 (no rotation). Note that the coordinate system is rotated.
X rotation is applied first, then y rotation, then z rotation.
Scaling is applied after rotation.
drift_terms : list of strings, optional
List of drift terms to include in three-dimensional universal kriging.
Supported drift terms are currently 'regional_linear', 'specified',
and 'functional'.
specified_drift : list of array-like objects, optional
List of arrays that contain the drift values at data points.
The arrays must be shape (N,) or (N, 1), where N is the number of
data points. Any number of specified-drift terms may be used.
functional_drift : list of callable objects, optional
List of callable functions that will be used to evaluate drift terms.
The function must be a function of only the three spatial coordinates
and must return a single value for each coordinate triplet.
It must be set up to be called with only three arguments,
first an array of x values, the second an array of y values,
and the third an array of z values. If the problem involves anisotropy,
the drift values are calculated in the adjusted data frame.
verbose : boolean, optional
Enables program text output to monitor kriging process.
Default is False (off).
enable_plotting : boolean, optional
Enables plotting to display variogram. Default is False (off).
References
----------
.. [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in
Hydrogeology, (Cambridge University Press, 1997) 272 p.
"""
UNBIAS = True # This can be changed to remove the unbiasedness condition
# Really for testing purposes only...
eps = 1.e-10 # Cutoff for comparison to zero
variogram_dict = {'linear': variogram_models.linear_variogram_model,
'power': variogram_models.power_variogram_model,
'gaussian': variogram_models.gaussian_variogram_model,
'spherical': variogram_models.spherical_variogram_model,
'exponential': variogram_models.exponential_variogram_model,
'hole-effect': variogram_models.hole_effect_variogram_model}
def __init__(self, x, y, z, val, variogram_model='linear',
variogram_parameters=None, variogram_function=None, nlags=6,
weight=False, anisotropy_scaling_y=1., anisotropy_scaling_z=1.,
anisotropy_angle_x=0., anisotropy_angle_y=0.,
anisotropy_angle_z=0., drift_terms=None, specified_drift=None,
functional_drift=None, verbose=False, enable_plotting=False):
# Deal with mutable default argument
if drift_terms is None:
drift_terms = []
if specified_drift is None:
specified_drift = []
if functional_drift is None:
functional_drift = []
# Code assumes 1D input arrays. Ensures that any extraneous dimensions
# don't get in the way. Copies are created to avoid any problems with
# referencing the original passed arguments.
self.X_ORIG = \
np.atleast_1d(np.squeeze(np.array(x, copy=True, dtype=np.float64)))
self.Y_ORIG = \
np.atleast_1d(np.squeeze(np.array(y, copy=True, dtype=np.float64)))
self.Z_ORIG = \
np.atleast_1d(np.squeeze(np.array(z, copy=True, dtype=np.float64)))
self.VALUES = \
np.atleast_1d(np.squeeze(np.array(val, copy=True, dtype=np.float64)))
self.verbose = verbose
self.enable_plotting = enable_plotting
if self.enable_plotting and self.verbose:
print("Plotting Enabled\n")
self.XCENTER = (np.amax(self.X_ORIG) + np.amin(self.X_ORIG))/2.0
self.YCENTER = (np.amax(self.Y_ORIG) + np.amin(self.Y_ORIG))/2.0
self.ZCENTER = (np.amax(self.Z_ORIG) + np.amin(self.Z_ORIG))/2.0
self.anisotropy_scaling_y = anisotropy_scaling_y
self.anisotropy_scaling_z = anisotropy_scaling_z
self.anisotropy_angle_x = anisotropy_angle_x
self.anisotropy_angle_y = anisotropy_angle_y
self.anisotropy_angle_z = anisotropy_angle_z
if self.verbose:
print("Adjusting data for anisotropy...")
self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED = \
_adjust_for_anisotropy(np.vstack((self.X_ORIG, self.Y_ORIG, self.Z_ORIG)).T,
[self.XCENTER, self.YCENTER, self.ZCENTER],
[self.anisotropy_scaling_y, self.anisotropy_scaling_z],
[self.anisotropy_angle_x, self.anisotropy_angle_y, self.anisotropy_angle_z]).T
# set up variogram...
self.variogram_model = variogram_model
if self.variogram_model not in self.variogram_dict.keys() and self.variogram_model != 'custom':
raise ValueError("Specified variogram model '%s' is not supported." % variogram_model)
elif self.variogram_model == 'custom':
if variogram_function is None or not callable(variogram_function):
raise ValueError("Must specify callable function for "
"custom variogram model.")
else:
self.variogram_function = variogram_function
else:
self.variogram_function = self.variogram_dict[self.variogram_model]
if self.verbose:
print("Initializing variogram model...")
vp_temp = _make_variogram_parameter_list(self.variogram_model,
variogram_parameters)
self.lags, self.semivariance, self.variogram_model_parameters = \
_initialize_variogram_model(np.vstack((self.X_ADJUSTED,
self.Y_ADJUSTED,
self.Z_ADJUSTED)).T,
self.VALUES, self.variogram_model,
vp_temp, self.variogram_function,
nlags, weight, 'euclidean')
if self.verbose:
if self.variogram_model == 'linear':
print("Using '%s' Variogram Model" % 'linear')
print("Slope:", self.variogram_model_parameters[0])
print("Nugget:", self.variogram_model_parameters[1], '\n')
elif self.variogram_model == 'power':
print("Using '%s' Variogram Model" % 'power')
print("Scale:", self.variogram_model_parameters[0])
print("Exponent:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], '\n')
elif self.variogram_model == 'custom':
print("Using Custom Variogram Model")
else:
print("Using '%s' Variogram Model" % self.variogram_model)
print("Partial Sill:", self.variogram_model_parameters[0])
print("Full Sill:", self.variogram_model_parameters[0] +
self.variogram_model_parameters[2])
print("Range:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], '\n')
if self.enable_plotting:
self.display_variogram_model()
if self.verbose:
print("Calculating statistics on variogram model fit...")
self.delta, self.sigma, self.epsilon = \
_find_statistics(np.vstack((self.X_ADJUSTED,
self.Y_ADJUSTED,
self.Z_ADJUSTED)).T,
self.VALUES, self.variogram_function,
self.variogram_model_parameters, 'euclidean')
self.Q1 = core.calcQ1(self.epsilon)
self.Q2 = core.calcQ2(self.epsilon)
self.cR = core.calc_cR(self.Q2, self.sigma)
if self.verbose:
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR, '\n')
if self.verbose:
print("Initializing drift terms...")
# Note that the regional linear drift values will be based on the
# adjusted coordinate system. Really, it doesn't actually matter
# which coordinate system is used here.
if 'regional_linear' in drift_terms:
self.regional_linear_drift = True
if self.verbose:
print("Implementing regional linear drift.")
else:
self.regional_linear_drift = False
if 'specified' in drift_terms:
if type(specified_drift) is not list:
raise TypeError("Arrays for specified drift terms must be "
"encapsulated in a list.")
if len(specified_drift) == 0:
raise ValueError("Must provide at least one drift-value array "
"when using the 'specified' drift capability.")
self.specified_drift = True
self.specified_drift_data_arrays = []
for term in specified_drift:
specified = np.squeeze(np.array(term, copy=True))
if specified.size != self.X_ORIG.size:
raise ValueError("Must specify the drift values for each "
"data point when using the "
"'specified' drift capability.")
self.specified_drift_data_arrays.append(specified)
else:
self.specified_drift = False
# The provided callable functions will be evaluated using
# the adjusted coordinates.
if 'functional' in drift_terms:
if type(functional_drift) is not list:
raise TypeError("Callables for functional drift terms must "
"be encapsulated in a list.")
if len(functional_drift) == 0:
raise ValueError("Must provide at least one callable object "
"when using the 'functional' drift capability.")
self.functional_drift = True
self.functional_drift_terms = functional_drift
else:
self.functional_drift = False
def update_variogram_model(self, variogram_model, variogram_parameters=None,
variogram_function=None, nlags=6, weight=False,
anisotropy_scaling_y=1., anisotropy_scaling_z=1.,
anisotropy_angle_x=0., anisotropy_angle_y=0.,
anisotropy_angle_z=0.):
"""Changes the variogram model and variogram parameters
for the kriging system.
Parameters
----------
variogram_model : str
May be any of the variogram models listed above.
May also be 'custom', in which case variogram_parameters and
variogram_function must be specified.
variogram_parameters : list or dict, optional
List or dict of variogram model parameters, as explained above.
If not provided, a best fit model will be calculated as
described above.
variogram_function : callable, optional
A callable function that must be provided if variogram_model is
specified as 'custom'. See above for more information.
nlags : int, optional)
Number of averaging bins for the semivariogram. Default is 6.
weight : boolean, optional
Flag that specifies if semivariance at smaller lags should be
weighted more heavily when automatically calculating variogram
model. See above for more information. True indicates that
weights will be applied. Default is False.
anisotropy_scaling_y : float, optional
Scalar stretching value to take into account anisotropy
in y-direction. Default is 1 (effectively no stretching).
See above for more information.
anisotropy_scaling_z : float, optional
Scalar stretching value to take into account anisotropy
in z-direction. Default is 1 (effectively no stretching).
See above for more information.
anisotropy_angle_x : float, optional
Angle (in degrees) by which to rotate coordinate system about
the x axis in order to take into account anisotropy.
Default is 0 (no rotation). See above for more information.
anisotropy_angle_y : float, optional
Angle (in degrees) by which to rotate coordinate system about
the y axis in order to take into account anisotropy.
Default is 0 (no rotation). See above for more information.
anisotropy_angle_z : float, optional
Angle (in degrees) by which to rotate coordinate system about
the z axis in order to take into account anisotropy.
Default is 0 (no rotation).
See above for more information.
"""
if anisotropy_scaling_y != self.anisotropy_scaling_y or \
anisotropy_scaling_z != self.anisotropy_scaling_z or \
anisotropy_angle_x != self.anisotropy_angle_x or \
anisotropy_angle_y != self.anisotropy_angle_y or \
anisotropy_angle_z != self.anisotropy_angle_z:
if self.verbose:
print("Adjusting data for anisotropy...")
self.anisotropy_scaling_y = anisotropy_scaling_y
self.anisotropy_scaling_z = anisotropy_scaling_z
self.anisotropy_angle_x = anisotropy_angle_x
self.anisotropy_angle_y = anisotropy_angle_y
self.anisotropy_angle_z = anisotropy_angle_z
self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED = \
_adjust_for_anisotropy(np.vstack((self.X_ORIG, self.Y_ORIG, self.Z_ORIG)).T,
[self.XCENTER, self.YCENTER, self.ZCENTER],
[self.anisotropy_scaling_y, self.anisotropy_scaling_z],
[self.anisotropy_angle_x, self.anisotropy_angle_y, self.anisotropy_angle_z]).T
self.variogram_model = variogram_model
if self.variogram_model not in self.variogram_dict.keys() and self.variogram_model != 'custom':
raise ValueError("Specified variogram model '%s' "
"is not supported." % variogram_model)
elif self.variogram_model == 'custom':
if variogram_function is None or not callable(variogram_function):
raise ValueError("Must specify callable function for "
"custom variogram model.")
else:
self.variogram_function = variogram_function
else:
self.variogram_function = self.variogram_dict[self.variogram_model]
if self.verbose:
print("Updating variogram mode...")
vp_temp = _make_variogram_parameter_list(self.variogram_model,
variogram_parameters)
self.lags, self.semivariance, self.variogram_model_parameters = \
_initialize_variogram_model(np.vstack((self.X_ADJUSTED,
self.Y_ADJUSTED,
self.Z_ADJUSTED)).T,
self.VALUES, self.variogram_model,
vp_temp, self.variogram_function,
nlags, weight, 'euclidean')
if self.verbose:
if self.variogram_model == 'linear':
print("Using '%s' Variogram Model" % 'linear')
print("Slope:", self.variogram_model_parameters[0])
print("Nugget:", self.variogram_model_parameters[1], '\n')
elif self.variogram_model == 'power':
print("Using '%s' Variogram Model" % 'power')
print("Scale:", self.variogram_model_parameters[0])
print("Exponent:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], '\n')
elif self.variogram_model == 'custom':
print("Using Custom Variogram Model")
else:
print("Using '%s' Variogram Model" % self.variogram_model)
print("Partial Sill:", self.variogram_model_parameters[0])
print("Full Sill:", self.variogram_model_parameters[0] +
self.variogram_model_parameters[2])
print("Range:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], '\n')
if self.enable_plotting:
self.display_variogram_model()
if self.verbose:
print("Calculating statistics on variogram model fit...")
self.delta, self.sigma, self.epsilon = \
_find_statistics(np.vstack((self.X_ADJUSTED,
self.Y_ADJUSTED,
self.Z_ADJUSTED)).T,
self.VALUES, self.variogram_function,
self.variogram_model_parameters, 'euclidean')
self.Q1 = core.calcQ1(self.epsilon)
self.Q2 = core.calcQ2(self.epsilon)
self.cR = core.calc_cR(self.Q2, self.sigma)
if self.verbose:
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR, '\n')
def display_variogram_model(self):
"""Displays semivariogram and variogram model."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.lags, self.semivariance, 'r*')
ax.plot(self.lags,
self.variogram_function(self.variogram_model_parameters, self.lags), 'k-')
plt.show()
def switch_verbose(self):
"""Enables/disables program text output. No arguments."""
self.verbose = not self.verbose
def switch_plotting(self):
"""Enables/disable variogram plot display. No arguments."""
self.enable_plotting = not self.enable_plotting
def get_epsilon_residuals(self):
"""Returns the epsilon residuals for the variogram fit. No arguments."""
return self.epsilon
def plot_epsilon_residuals(self):
"""Plots the epsilon residuals for the variogram fit. No arguments."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(range(self.epsilon.size), self.epsilon, c='k', marker='*')
ax.axhline(y=0.0)
plt.show()
def get_statistics(self):
"""Returns the Q1, Q2, and cR statistics for the
variogram fit (in that order). No arguments.
"""
return self.Q1, self.Q2, self.cR
def print_statistics(self):
"""Prints out the Q1, Q2, and cR statistics for the variogram fit.
NOTE that ideally Q1 is close to zero, Q2 is close to 1,
and cR is as small as possible.
"""
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR)
def _get_kriging_matrix(self, n, n_withdrifts):
"""Assembles the kriging matrix."""
xyz = np.concatenate((self.X_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis],
self.Z_ADJUSTED[:, np.newaxis]), axis=1)
d = cdist(xyz, xyz, 'euclidean')
if self.UNBIAS:
a = np.zeros((n_withdrifts+1, n_withdrifts+1))
else:
a = np.zeros((n_withdrifts, n_withdrifts))
a[:n, :n] = - self.variogram_function(self.variogram_model_parameters, d)
np.fill_diagonal(a, 0.)
i = n
if self.regional_linear_drift:
a[:n, i] = self.X_ADJUSTED
a[i, :n] = self.X_ADJUSTED
i += 1
a[:n, i] = self.Y_ADJUSTED
a[i, :n] = self.Y_ADJUSTED
i += 1
a[:n, i] = self.Z_ADJUSTED
a[i, :n] = self.Z_ADJUSTED
i += 1
if self.specified_drift:
for arr in self.specified_drift_data_arrays:
a[:n, i] = arr
a[i, :n] = arr
i += 1
if self.functional_drift:
for func in self.functional_drift_terms:
a[:n, i] = func(self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)
a[i, :n] = func(self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)
i += 1
if i != n_withdrifts:
warnings.warn("Error in creating kriging matrix. Kriging may fail.", RuntimeWarning)
if self.UNBIAS:
a[n_withdrifts, :n] = 1.0
a[:n, n_withdrifts] = 1.0
a[n:n_withdrifts + 1, n:n_withdrifts + 1] = 0.0
return a
def _exec_vector(self, a, bd, xyz, mask, n_withdrifts, spec_drift_grids):
"""Solves the kriging system as a vectorized operation. This method
can take a lot of memory for large grids and/or large datasets."""
npt = bd.shape[0]
n = self.X_ADJUSTED.shape[0]
zero_index = None
zero_value = False
a_inv = scipy.linalg.inv(a)
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
if self.UNBIAS:
b = np.zeros((npt, n_withdrifts+1, 1))
else:
b = np.zeros((npt, n_withdrifts, 1))
b[:, :n, 0] = - self.variogram_function(self.variogram_model_parameters, bd)
if zero_value:
b[zero_index[0], zero_index[1], 0] = 0.0
i = n
if self.regional_linear_drift:
b[:, i, 0] = xyz[:, 2]
i += 1
b[:, i, 0] = xyz[:, 1]
i += 1
b[:, i, 0] = xyz[:, 0]
i += 1
if self.specified_drift:
for spec_vals in spec_drift_grids:
b[:, i, 0] = spec_vals.flatten()
i += 1
if self.functional_drift:
for func in self.functional_drift_terms:
b[:, i, 0] = func(xyz[:, 2], xyz[:, 1], xyz[:, 0])
i += 1
if i != n_withdrifts:
warnings.warn("Error in setting up kriging system. "
"Kriging may fail.", RuntimeWarning)
if self.UNBIAS:
b[:, n_withdrifts, 0] = 1.0
if (~mask).any():
mask_b = np.repeat(mask[:, np.newaxis, np.newaxis],
n_withdrifts+1, axis=1)
b = np.ma.array(b, mask=mask_b)
if self.UNBIAS:
x = np.dot(a_inv, b.reshape((npt, n_withdrifts+1)).T).reshape((1, n_withdrifts+1, npt)).T
else:
x = np.dot(a_inv, b.reshape((npt, n_withdrifts)).T).reshape((1, n_withdrifts, npt)).T
kvalues = np.sum(x[:, :n, 0] * self.VALUES, axis=1)
sigmasq = np.sum(x[:, :, 0] * -b[:, :, 0], axis=1)
return kvalues, sigmasq
def _exec_loop(self, a, bd_all, xyz, mask, n_withdrifts, spec_drift_grids):
"""Solves the kriging system by looping over all specified points.
Less memory-intensive, but involves a Python-level loop."""
npt = bd_all.shape[0]
n = self.X_ADJUSTED.shape[0]
kvalues = np.zeros(npt)
sigmasq = np.zeros(npt)
a_inv = scipy.linalg.inv(a)
for j in np.nonzero(~mask)[0]: # Note that this is the same thing as range(npt) if mask is not defined,
bd = bd_all[j] # otherwise it takes the non-masked elements.
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
else:
zero_value = False
zero_index = None
if self.UNBIAS:
b = np.zeros((n_withdrifts+1, 1))
else:
b = np.zeros((n_withdrifts, 1))
b[:n, 0] = - self.variogram_function(self.variogram_model_parameters, bd)
if zero_value:
b[zero_index[0], 0] = 0.0
i = n
if self.regional_linear_drift:
b[i, 0] = xyz[j, 2]
i += 1
b[i, 0] = xyz[j, 1]
i += 1
b[i, 0] = xyz[j, 0]
i += 1
if self.specified_drift:
for spec_vals in spec_drift_grids:
b[i, 0] = spec_vals.flatten()[i]
i += 1
if self.functional_drift:
for func in self.functional_drift_terms:
b[i, 0] = func(xyz[j, 2], xyz[j, 1], xyz[j, 0])
i += 1
if i != n_withdrifts:
warnings.warn("Error in setting up kriging system. "
"Kriging may fail.", RuntimeWarning)
if self.UNBIAS:
b[n_withdrifts, 0] = 1.0
x = np.dot(a_inv, b)
kvalues[j] = np.sum(x[:n, 0] * self.VALUES)
sigmasq[j] = np.sum(x[:, 0] * -b[:, 0])
return kvalues, sigmasq
def execute(self, style, xpoints, ypoints, zpoints, mask=None,
backend='vectorized', specified_drift_arrays=None):
"""Calculates a kriged grid and the associated variance.
This is now the method that performs the main kriging calculation.
Note that currently measurements (i.e., z values) are
considered 'exact'. This means that, when a specified coordinate for
interpolation is exactly the same as one of the data points,
the variogram evaluated at the point is forced to be zero. Also, the
diagonal of the kriging matrix is also always forced to be zero.
In forcing the variogram evaluated at data points to be zero, we are
effectively saying that there is no variance at that point
(no uncertainty, so the value is 'exact').
In the future, the code may include an extra 'exact_values' boolean
flag that can be adjusted to specify whether to treat the measurements
as 'exact'. Setting the flag to false would indicate that the variogram
should not be forced to be zero at zero distance (i.e., when evaluated
at data points). Instead, the uncertainty in the point will be equal
to the nugget. This would mean that the diagonal of the kriging matrix
would be set to the nugget instead of to zero.
Parameters
----------
style : str
Specifies how to treat input kriging points. Specifying 'grid'
treats xpoints, ypoints, and zpoints as arrays of x, y, and z
coordinates that define a rectangular grid. Specifying 'points'
treats xpoints, ypoints, and zpoints as arrays that provide
coordinates at which to solve the kriging system. Specifying
'masked' treats xpoints, ypoints, and zpoints as arrays of x, y,
and z coordinates that define a rectangular grid and uses mask
to only evaluate specific points in the grid.
xpoints : array_like, shape (N,) or (N, 1)
If style is specific as 'grid' or 'masked', x-coordinates of
LxMxN grid. If style is specified as 'points', x-coordinates of
specific points at which to solve kriging system.
ypoints : array_like, shape (M,) or (M, 1)
If style is specified as 'grid' or 'masked', y-coordinates of
LxMxN grid. If style is specified as 'points', y-coordinates of
specific points at which to solve kriging system. Note that in this
case, xpoints, ypoints, and zpoints must have the same dimensions
(i.e., L = M = N).
zpoints : array_like, shape (L,) or (L, 1)
If style is specified as 'grid' or 'masked', z-coordinates of
LxMxN grid. If style is specified as 'points', z-coordinates of
specific points at which to solve kriging system. Note that in this
case, xpoints, ypoints, and zpoints must have the same dimensions
(i.e., L = M = N).
mask : boolean array, shape (L, M, N), optional
Specifies the points in the rectangular grid defined by xpoints,
ypoints, zpoints that are to be excluded in the kriging
calculations. Must be provided if style is specified as 'masked'.
False indicates that the point should not be masked, so the kriging
system will be solved at the point.
True indicates that the point should be masked, so the kriging
system will not be solved at the point.
backend : string, optional
Specifies which approach to use in kriging. Specifying 'vectorized'
will solve the entire kriging problem at once in a vectorized
operation. This approach is faster but also can consume a
significant amount of memory for large grids and/or large datasets.
Specifying 'loop' will loop through each point at which the kriging
system is to be solved. This approach is slower but also less
memory-intensive. Default is 'vectorized'.
specified_drift_arrays : list of array-like objects, optional
Specifies the drift values at the points at which the kriging
system is to be evaluated. Required if 'specified' drift provided
in the list of drift terms when instantiating the UniversalKriging3D
class. Must be a list of arrays in the same order as the list
provided when instantiating the kriging object. Array(s) must be
the same dimension as the specified grid or have the same number
of points as the specified points; i.e., the arrays either must be
shape (L, M, N), where L is the number of z grid-points,
M is the number of y grid-points, and N is the number of
x grid-points, or shape (N,) or (N, 1), where N is the number of
points at which to evaluate the kriging system.
Returns
-------
kvalues : ndarray, shape (L, M, N) or (N,) or (N, 1)
Interpolated values of specified grid or at the specified set
of points. If style was specified as 'masked', kvalues will be a
numpy masked array.
sigmasq : ndarray, shape (L, M, N) or (N,) or (N, 1)
Variance at specified grid points or at the specified set of points.
If style was specified as 'masked', sigmasq will be a numpy
masked array.
"""
if self.verbose:
print("Executing Ordinary Kriging...\n")
if style != 'grid' and style != 'masked' and style != 'points':
raise ValueError("style argument must be 'grid', 'points', "
"or 'masked'")
xpts = np.atleast_1d(np.squeeze(np.array(xpoints, copy=True)))
ypts = np.atleast_1d(np.squeeze(np.array(ypoints, copy=True)))
zpts = np.atleast_1d(np.squeeze(np.array(zpoints, copy=True)))
n = self.X_ADJUSTED.shape[0]
n_withdrifts = n
if self.regional_linear_drift:
n_withdrifts += 3
if self.specified_drift:
n_withdrifts += len(self.specified_drift_data_arrays)
if self.functional_drift:
n_withdrifts += len(self.functional_drift_terms)
nx = xpts.size
ny = ypts.size
nz = zpts.size
a = self._get_kriging_matrix(n, n_withdrifts)
if style in ['grid', 'masked']:
if style == 'masked':
if mask is None:
raise IOError("Must specify boolean masking array "
"when style is 'masked'.")
if mask.ndim != 3:
raise ValueError("Mask is not three-dimensional.")
if mask.shape[0] != nz or mask.shape[1] != ny or mask.shape[2] != nx:
if mask.shape[0] == nx and mask.shape[2] == nz and mask.shape[1] == ny:
mask = mask.swapaxes(0, 2)
else:
raise ValueError("Mask dimensions do not match "
"specified grid dimensions.")
mask = mask.flatten()
npt = nz * ny * nx
grid_z, grid_y, grid_x = np.meshgrid(zpts, ypts, xpts, indexing='ij')
xpts = grid_x.flatten()
ypts = grid_y.flatten()
zpts = grid_z.flatten()
elif style == 'points':
if xpts.size != ypts.size and ypts.size != zpts.size:
raise ValueError("xpoints and ypoints must have same "
"dimensions when treated as listing "
"discrete points.")
npt = nx
else:
raise ValueError("style argument must be 'grid', 'points', "
"or 'masked'")
if specified_drift_arrays is None:
specified_drift_arrays = []
spec_drift_grids = []
if self.specified_drift:
if len(specified_drift_arrays) == 0:
raise ValueError("Must provide drift values for kriging "
"points when using 'specified' drift "
"capability.")
if type(specified_drift_arrays) is not list:
raise TypeError("Arrays for specified drift terms must "
"be encapsulated in a list.")
for spec in specified_drift_arrays:
if style in ['grid', 'masked']:
if spec.ndim < 3:
raise ValueError("Dimensions of drift values array do "
"not match specified grid dimensions.")
elif spec.shape[0] != nz or spec.shape[1] != ny or spec.shape[2] != nx:
if spec.shape[0] == nx and spec.shape[2] == nz and spec.shape[1] == ny:
spec_drift_grids.append(np.squeeze(spec.swapaxes(0, 2)))
else:
raise ValueError("Dimensions of drift values array "
"do not match specified grid "
"dimensions.")
else:
spec_drift_grids.append(np.squeeze(spec))
elif style == 'points':
if spec.ndim != 1:
raise ValueError("Dimensions of drift values array do "
"not match specified grid dimensions.")
elif spec.shape[0] != xpts.size:
raise ValueError("Number of supplied drift values in "
"array do not match specified number "
"of kriging points.")
else:
spec_drift_grids.append(np.squeeze(spec))
if len(spec_drift_grids) != len(self.specified_drift_data_arrays):
raise ValueError("Inconsistent number of specified "
"drift terms supplied.")
else:
if len(specified_drift_arrays) != 0:
warnings.warn("Provided specified drift values, but "
"'specified' drift was not initialized during "
"instantiation of UniversalKriging3D class.",
RuntimeWarning)
xpts, ypts, zpts = _adjust_for_anisotropy(np.vstack((xpts, ypts, zpts)).T,
[self.XCENTER, self.YCENTER, self.ZCENTER],
[self.anisotropy_scaling_y, self.anisotropy_scaling_z],
[self.anisotropy_angle_x, self.anisotropy_angle_y,
self.anisotropy_angle_z]).T
if style != 'masked':
mask = np.zeros(npt, dtype='bool')
xyz_points = np.concatenate((zpts[:, np.newaxis], ypts[:, np.newaxis],
xpts[:, np.newaxis]), axis=1)
xyz_data = np.concatenate((self.Z_ADJUSTED[:, np.newaxis],
self.Y_ADJUSTED[:, np.newaxis],
self.X_ADJUSTED[:, np.newaxis]), axis=1)
bd = cdist(xyz_points, xyz_data, 'euclidean')
if backend == 'vectorized':
kvalues, sigmasq = self._exec_vector(a, bd, xyz_points, mask,
n_withdrifts, spec_drift_grids)
elif backend == 'loop':
kvalues, sigmasq = self._exec_loop(a, bd, xyz_points, mask,
n_withdrifts, spec_drift_grids)
else:
raise ValueError('Specified backend {} is not supported for '
'3D ordinary kriging.'.format(backend))
if style == 'masked':
kvalues = np.ma.array(kvalues, mask=mask)
sigmasq = np.ma.array(sigmasq, mask=mask)
if style in ['masked', 'grid']:
kvalues = kvalues.reshape((nz, ny, nx))
sigmasq = sigmasq.reshape((nz, ny, nx))
return kvalues, sigmasq
|
bsd-3-clause
|
siutanwong/scikit-learn
|
sklearn/linear_model/tests/test_perceptron.py
|
378
|
1815
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
|
bsd-3-clause
|
Castronova/ODM2PythonAPI
|
src/api/ODM2/services/readService.py
|
1
|
21883
|
__author__ = 'jmeline'
from sqlalchemy import func
import pandas as pd
from ...ODM2.models import *
from .. import serviceBase
from ODM2PythonAPI.src.api.ODM2 import models
class ReadODM2( serviceBase ):
'''
def __init__(self, session):
self._session = session
'''
# ################################################################################
# Annotations
# ################################################################################
# ################################################################################
# CV
# ################################################################################
# ################################################################################
# Core
# ################################################################################
"""
Variable
"""
def getVariables(self):
"""Select all on Variables
:return Variable Objects:
:type list:
"""
return self._session.query(Variables).all()
def getVariableById(self, variableId):
"""Select by variableId
:param variableId:
:type Integer:
:return Return matching Variable object filtered by variableId:
:type Variable:
"""
try:
return self._session.query(Variables).filter_by(VariableID=variableId).first()
except:
return None
def getVariableByCode(self, variableCode):
"""Select by variableCode
:param variableCode:
:type String:
:return Return matching Variable Object filtered by variableCode:
:type Variable:
"""
try:
return self._session.query(Variables).filter_by(VariableCode=variableCode).first()
except:
return None
"""
Method
"""
def getMethods(self):
"""Select all on Methods
:return Method Objects:
:type list:
"""
return self._session.query(Methods).all()
def getMethodById(self, methodId):
"""Select by methodId
:param methodId:
:type Integer
:return Return matching Method Object filtered by methodId:
:type Method:
"""
try:
return self._session.query(Methods).filter_by(MethodID=methodId).first()
except:
return None
def getMethodByCode(self, methodCode):
"""Select by methodCode
:param methodCode:
:type String:
:return Return matching Method Object filtered by method Code:
:type Method:
"""
try:
return self._session.query(Methods).filter_by(MethodCode=methodCode).first()
except:
return None
"""
ProcessingLevel
"""
def getProcessingLevels(self):
"""Select all on Processing Level
:return ProcessingLevel Objects:
:type list:
"""
return self._session.query(ProcessingLevels).all()
def getProcessingLevelById(self, processingId):
"""Select by processingId
:param processingId:
:type Integer:
:return Return matching ProcessingLevel Object filtered by processingId:
:type Processinglevel:
"""
try:
return self._session.query(ProcessingLevels).filter_by(ProcessingLevelID=processingId).first()
except:
return None
def getProcessingLevelByCode(self, processingCode):
"""Select by processingCode
:param processingCode:
:type String(50):
:return Return matching Processinglevel Object filtered by processingCode:
:type Processinglevel:
"""
try:
return self._session.query(ProcessingLevels).filter_by(ProcessingLevelCode=str(processingCode)).first()
except Exception, e:
print e
return None
"""
Sampling Feature
"""
def getSamplingFeatures(self):
"""Select all on SamplingFeatures
:return SamplingFeature Objects:
:type list:
"""
return self._session.query(SamplingFeatures).all()
def getSamplingFeatureById(self, samplingId):
"""Select by samplingId
:param samplingId:
:type Integer:
:return Return matching SamplingFeature Object filtered by samplingId:
:type SamplingFeature:
"""
try:
return self._session.query(SamplingFeatures).filter_by(SamplingFeatureID=samplingId).first()
except:
return None
def getSamplingFeatureByCode(self, samplingFeatureCode):
"""Select by samplingFeatureCode
:param samplingFeatureCode:
:type String:
:return Return matching SamplingFeature Object filtered by samplingId
:type list:
"""
try:
return self._session.query(SamplingFeatures).filter_by(SamplingFeatureCode=samplingFeatureCode).first()
except Exception as e:
return None
def getSamplingFeaturesByType(self, samplingFeatureTypeCV):
"""Select by samplingFeatureTypeCV
:param samplingFeatureTypeCV:
:type String:
:return Return matching SamplingFeature Objects filtered by samplingFeatureTypeCV:
:type list:
"""
try:
return self._session.query(SamplingFeatures).filter_by(SamplingFeatureTypeCV=samplingFeatureTypeCV).all()
except Exception as e:
print e
return None
def getSamplingFeatureByGeometry(self, wkt_geometry):
try:
# ST_Equals(geometry, geometry)
return self._session.query(SamplingFeatures).filter(
func.ST_AsText(SamplingFeatures.FeatureGeometry) == func.ST_AsText(wkt_geometry)).first()
except Exception, e:
print e
return None
def getGeometryTest(self, TestGeom):
Geom = self._session.query(SamplingFeatures).first()
print "Queried Geometry: ", self._session.query(Geom.FeatureGeometry.ST_AsText()).first()
GeomText = self._session.query(
func.ST_Union(Geom.FeatureGeometry, func.ST_GeomFromText(TestGeom)).ST_AsText()).first()
print GeomText
"""
Unit
"""
def getUnits(self):
"""Select all on Unit
:return Unit Objects:
:type list:
"""
return self._session.query(Units).all()
def getUnitById(self, unitId):
"""Select by samplingId
:param unitId:
:type Integer:
:return Return matching Unit Object filtered by UnitId:
:type Unit:
"""
try:
return self._session.query(Units).filter_by(UnitsID=unitId).first()
except:
return None
def getUnitByName(self, unitName):
try:
return self._session.query(Units).filter(Units.UnitsName.ilike(unitName)).first()
except:
return None
"""
Organization
"""
def getOrganizations(self):
"""Select all on Organization
:return Organization Objects:
:type list:
"""
return self._session.query(Organizations).all()
def getOrganizationById(self, orgId):
"""Select by orgId
:param orgId:
:type Integer:
:return Return matching Unit Object filtered by orgId:
:type Organization:
"""
try:
return self._session.query(Organizations).filter_by(OrganizationID=orgId).first()
except:
return None
def getOrganizationByCode(self, orgCode):
"""Select by orgCode
:param orgCode:
:type String:
:return Return matching Organization Object filtered by orgCode
:type Organization:
"""
try:
return self._session.query(Organizations).filter_by(OrganizationCode=orgCode).first()
except:
return None
"""
Person
"""
def getPeople(self):
"""Select all on Person
:return Person Objects:
:type list:
"""
return self._session.query(People).all()
def getPersonById(self, personId):
"""Select by personId
:param personId:
:type Integer:
:return Return matching Person Object filtered by personId:
:type Person:
"""
try:
return self._session.query(People).filter_by(PersonID=personId).first()
except:
return None
def getPersonByName(self, personfirst, personlast):
"""Select by person name, last name combination
:param personfirst: first name of person
:param personlast: last name of person
:return Return matching Person Object:
:type Person:
"""
try:
return self._session.query(People).filter(People.PersonFirstName.ilike(personfirst)). \
filter(People.PersonLastName.ilike(personlast)).first()
except:
return None
def getAffiliationByPersonAndOrg(self, personfirst, personlast, orgcode):
"""
Select all affiliation of person
:param personfirst: first name of person
:param personlast: last name of person
:param orgcode: organization code (e.g. uwrl)
:return: ODM2.Affiliation
"""
try:
return self._session.query(Affiliations).filter(Organizations.OrganizationCode.ilike(orgcode)) \
.filter(People.PersonFirstName.ilike(personfirst)) \
.filter(People.PersonLastName.ilike(personlast)).first()
except:
return None
def getAffiliationsByPerson(self, personfirst, personlast):
"""
Select all affiliation of person
:param personfirst: first name of person
:param personlast: last name of person
:return: [ODM2.Affiliation]
"""
try:
return self._session.query(Affiliations).filter(People.PersonFirstName.ilike(personfirst)) \
.filter(People.PersonLastName.ilike(personlast)).all()
except:
return None
"""
Results
"""
def getResults(self):
try:
return self._session.query(Results).all()
except:
return None
def getResultByActionID(self, actionID):
try:
return self._session.query(Results).join(FeatureActions).join(Actions).filter_by(ActionID=actionID).all()
except:
return None
def getResultByID(self, resultID):
try:
return self._session.query(Results).filter_by(ResultID=resultID).one()
except:
return None
def getResultAndGeomByID(self, resultID):
try:
return self._session.query(Results, SamplingFeatures.FeatureGeometry.ST_AsText()). \
join(FeatureActions). \
join(SamplingFeatures). \
join(Results). \
filter_by(ResultID=resultID).one()
except:
return None
def getResultAndGeomByActionID(self, actionID):
try:
return self._session.query(Results, SamplingFeatures.FeatureGeometry.ST_AsText()). \
join(FeatureActions). \
join(SamplingFeatures). \
join(Actions). \
filter_by(ActionID=actionID).all()
except:
return None
def getResultValidDateTime(self, resultId):
q = self._session.query(Results.ValidDateTime).filter(Results.ResultID==int(resultId))
return q.first()
"""
Datasets
"""
def getDataSets(self):
try:
return self._session.query(DataSets).all()
except:
return None
def getDatasetByCode(self, dscode):
try:
return self._session.query(DataSets).filer(DataSets.DatasetCode.ilike(dscode)).first()
except:
return None
# ################################################################################
# Data Quality
# ################################################################################
def getAllDataQuality(self):
"""Select all on Data Quality
:return Dataquality Objects:
:type list:
"""
return self._session.query(DataQuality).all()
# ################################################################################
# Equipment
# ################################################################################
def getAllEquipment(self):
return self._session.query(Equipment).all()
# ################################################################################
# Extension Properties
# ################################################################################
# ################################################################################
# External Identifiers
# ################################################################################
# ################################################################################
# Lab Analyses
# ################################################################################
# ################################################################################
# Provenance
# ################################################################################
"""
Citation
"""
def getCitations(self):
self._session.query(Citations).all()
# ################################################################################
# Results
# ################################################################################
"""
TimeSeriesResults
"""
def getTimeSeriesResults(self):
"""Select all on TimeSeriesResults
:return TimeSeriesResults Objects:
:type list:
"""
return self._session.query(TimeSeriesResults).all()
def getTimeSeriesResultByResultId(self, resultId):
"""Select by resultID on ResultID
:param resultId:
:type Integer:
:return return matching Timeseriesresult Object filtered by resultId
"""
try:
return self._session.query(TimeSeriesResults).filter_by(ResultID=resultId).one()
except:
return None
def getTimeSeriesResultbyCode(self, timeSeriesCode):
"""Select by time
"""
pass
"""
TimeSeriesResultValues
"""
def getTimeSeriesResultValues(self):
"""Select all on TimeSeriesResults
:return TimeSeriesResultsValue Objects:
:type list:
"""
q = self._session.query(TimeSeriesResults).all()
df = pd.DataFrame([dv.list_repr() for dv in q])
df.columns = q[0].get_columns()
return df
# return self._session.query(Timeseriesresultvalue).all()
def getDateTime_From_Time_Series_Result_Values(self, id):
"""Select by resultId
:param timeSeriesId:
:type Integer:
:return return matching Timeseriesresultvalue Object filtered by resultId:
:type Timeseriesresultvalue:
"""
# try:
# q = self._session.query(TimeSeriesResults).filter_by(ResultID=resultId).all()
#
# df = pd.DataFrame([dv.list_repr() for dv in q])
# df.columns = q[0].get_columns()
# return df
# # return self._session.query(Timeseriesresultvalue).filter_by(ResultID=resultId).all()
# except Exception as e:
# return None
try:
# return self._session.query(models.TimeSeriesResultValues).filter_by(ResultID=resultId).all()
return self._session.execute("SELECT datetime(ValueDateTime) From timeseriesresultvalues WHERE ResultID=%d;" % id).fetchall()
except:
return None
def getTimeSeriesResultValuesByResultID(self, id):
try:
return self._session.query(models.TimeSeriesResultValues).filter_by(ResultID=id).all()
except:
return None
def getTimeSeriesResultValuesByCode(self, timeSeriesCode):
"""
:param timeSeriesCode:
:return:
"""
pass
def getTimeSeriesResultValuesByTime(self, resultid, starttime, endtime=None):
# set end = start if it is None
endtime = starttime if not endtime else endtime
try:
return self._session.query(TimeSeriesResultValues).filter_by(ResultID=resultid) \
.filter(TimeSeriesResultValues.ValueDateTime >= starttime) \
.filter(TimeSeriesResultValues.ValueDateTime <= endtime) \
.order_by(TimeSeriesResultValues.ValueDateTime).all()
except:
return None
# ################################################################################
# Annotations
# ################################################################################
"""
Site
"""
def getAllSites(self):
"""Select all on Sites
:return Site Objects:
:type list:
"""
return self._session.query(Sites).all()
def getSiteBySFId(self, siteId):
"""Select by siteId
:param siteId:
:type Integer:
:return Return matching Site Object filtered by siteId:
:type Site:
"""
try:
return self._session.query(Sites).filter_by(SamplingFeatureID=siteId).one()
except:
return None
def getSiteBySFCode(self, siteCode):
"""Select by siteCode
:param siteCode:
:type String:
:return Return matching Samplingfeature Object filtered by siteCode:
:type Samplingfeature:
"""
sf = self._session.query(SamplingFeatures).filter_by(SamplingFeatureCode=siteCode).one()
return self._session.query(Sites).filter_by(SamplingFeatureID=sf.SamplingFeatureID).one()
def getSpatialReferenceByCode(self, srsCode):
try:
return self._session.query(SpatialReferences).filter(SpatialReferences.SRSCode.ilike(srsCode)).first()
except:
return None
# ################################################################################
# Sensors
# ################################################################################
def getAllDeploymentAction(self):
"""Select all on DeploymentAction
:return DeploymentAction Objects:
:type list:
"""
return self._session.query(DeploymentActions).all()
# return self._session.query)
def getDeploymentActionById(self, deploymentId):
"""Select by deploymentId
:param deploymentId:
:type Integer:
:return Return Matching DeploymentAction Object filtered by deploymentId:
:type DeploymentAction:
"""
try:
return self._session.query(DeploymentActions).filter_by(DeploymentActionID=deploymentId).one()
except:
return None
def getDeploymentActionByCode(self, deploymentCode):
"""Select by deploymentCode
:param deploymentCode:
:type String:
:return Return matching DeploymentAction Object filtered by deploymentCode:
:type DeploymentAction:
"""
try:
return self._session.query(DeploymentActions).filter_by(DeploymentActionCode=deploymentCode).one()
except:
return None
# ################################################################################
# Simulation
# ################################################################################
def getAllModels(self):
try:
return self._session.query(Models).all()
except:
return None
def getModelByCode(self, modelcode):
try:
return self._session.query(Models).filter(Models.ModelCode.ilike(modelcode)).first()
except:
return None
def getAllSimulations(self):
try:
return self._session.query(Simulations).all()
except:
return None
def getSimulationByName(self, simulationName):
try:
return self._session.query(Simulations).filter(Simulations.SimulationName.ilike(simulationName)).first()
except:
return None
def getSimulationByActionID(self, actionID):
try:
return self._session.query(Simulations).filter_by(ActionID=actionID).first()
except:
return None
def getRelatedModelsByID(self, modelid):
try:
return self._session.query(RelatedModels).filter_by(RelatedModelID=modelid).all()
except:
return None
def getRelatedModelsByCode(self, modelcode):
try:
return self._session.query(RelatedModels).join(Models, RelatedModels.ModelID == Models.ModelID) \
.filter(Models.ModelCode == modelcode).all()
except:
return None
def getResultsBySimulationID(self, simulationID):
# is this correct? because the result table has no simulationId column
try:
return self._session.query(Results).filter(Simulations.SimulationID == simulationID).all()
except:
return None
def getResultByResultID(self, id):
try:
return self._session.query(models.Results).filter_by(ResultID=id).all()
except:
return None
# ################################################################################
# ODM2
# ################################################################################
#
# class readODM2(object):
# def test(self):
# return None
|
bsd-3-clause
|
wxiang7/airflow
|
airflow/contrib/plugins/metastore_browser/main.py
|
6
|
5122
|
from datetime import datetime
import json
from flask import Blueprint, request
from flask_admin import BaseView, expose
import pandas as pd
from airflow.hooks import HiveMetastoreHook, MySqlHook, PrestoHook, HiveCliHook
from airflow.plugins_manager import AirflowPlugin
from airflow.www import utils as wwwutils
METASTORE_CONN_ID = 'metastore_default'
METASTORE_MYSQL_CONN_ID = 'metastore_mysql'
PRESTO_CONN_ID = 'presto_default'
HIVE_CLI_CONN_ID = 'hive_default'
DEFAULT_DB = 'default'
DB_WHITELIST = None
DB_BLACKLIST = ['tmp']
TABLE_SELECTOR_LIMIT = 2000
# Keeping pandas from truncating long strings
pd.set_option('display.max_colwidth', -1)
# Creating a flask admin BaseView
class MetastoreBrowserView(BaseView, wwwutils.DataProfilingMixin):
@expose('/')
def index(self):
sql = """
SELECT
a.name as db, db_location_uri as location,
count(1) as object_count, a.desc as description
FROM DBS a
JOIN TBLS b ON a.DB_ID = b.DB_ID
GROUP BY a.name, db_location_uri, a.desc
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
df.db = (
'<a href="/admin/metastorebrowserview/db/?db=' +
df.db + '">' + df.db + '</a>')
table = df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
escape=False,
na_rep='',)
return self.render(
"metastore_browser/dbs.html", table=table)
@expose('/table/')
def table(self):
table_name = request.args.get("table")
m = HiveMetastoreHook(METASTORE_CONN_ID)
table = m.get_table(table_name)
return self.render(
"metastore_browser/table.html",
table=table, table_name=table_name, datetime=datetime, int=int)
@expose('/db/')
def db(self):
db = request.args.get("db")
m = HiveMetastoreHook(METASTORE_CONN_ID)
tables = sorted(m.get_tables(db=db), key=lambda x: x.tableName)
return self.render(
"metastore_browser/db.html", tables=tables, db=db)
@wwwutils.gzipped
@expose('/partitions/')
def partitions(self):
schema, table = request.args.get("table").split('.')
sql = """
SELECT
a.PART_NAME,
a.CREATE_TIME,
c.LOCATION,
c.IS_COMPRESSED,
c.INPUT_FORMAT,
c.OUTPUT_FORMAT
FROM PARTITIONS a
JOIN TBLS b ON a.TBL_ID = b.TBL_ID
JOIN DBS d ON b.DB_ID = d.DB_ID
JOIN SDS c ON a.SD_ID = c.SD_ID
WHERE
b.TBL_NAME like '{table}' AND
d.NAME like '{schema}'
ORDER BY PART_NAME DESC
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@wwwutils.gzipped
@expose('/objects/')
def objects(self):
where_clause = ''
if DB_WHITELIST:
dbs = ",".join(["'" + db + "'" for db in DB_WHITELIST])
where_clause = "AND b.name IN ({})".format(dbs)
if DB_BLACKLIST:
dbs = ",".join(["'" + db + "'" for db in DB_BLACKLIST])
where_clause = "AND b.name NOT IN ({})".format(dbs)
sql = """
SELECT CONCAT(b.NAME, '.', a.TBL_NAME), TBL_TYPE
FROM TBLS a
JOIN DBS b ON a.DB_ID = b.DB_ID
WHERE
a.TBL_NAME NOT LIKE '%tmp%' AND
a.TBL_NAME NOT LIKE '%temp%' AND
b.NAME NOT LIKE '%tmp%' AND
b.NAME NOT LIKE '%temp%'
{where_clause}
LIMIT {LIMIT};
""".format(where_clause=where_clause, LIMIT=TABLE_SELECTOR_LIMIT)
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
d = [
{'id': row[0], 'text': row[0]}
for row in h.get_records(sql)]
return json.dumps(d)
@wwwutils.gzipped
@expose('/data/')
def data(self):
table = request.args.get("table")
sql = "SELECT * FROM {table} LIMIT 1000;".format(table=table)
h = PrestoHook(PRESTO_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@expose('/ddl/')
def ddl(self):
table = request.args.get("table")
sql = "SHOW CREATE TABLE {table};".format(table=table)
h = HiveCliHook(HIVE_CLI_CONN_ID)
return h.run_cli(sql)
v = MetastoreBrowserView(category="Plugins", name="Hive Metadata Browser")
# Creating a flask blueprint to intergrate the templates and static folder
bp = Blueprint(
"metastore_browser", __name__,
template_folder='templates',
static_folder='static',
static_url_path='/static/metastore_browser')
# Defining the plugin class
class MetastoreBrowserPlugin(AirflowPlugin):
name = "metastore_browser"
flask_blueprints = [bp]
admin_views = [v]
|
apache-2.0
|
aliciawyy/dmining
|
models/struct.py
|
1
|
1697
|
from sklearn import model_selection
import xgboost as xgb
import dm_common
def get_bootstrap_sample(x, y, random_state=0):
# x should be of type pandas DataFrame
x_bootstrap = x.sample(len(x), random_state=random_state, replace=True)
return x_bootstrap, y[x_bootstrap.index]
class Problem(dm_common.StringMixin):
def __init__(self, data, target):
if data.shape[0] != len(target):
raise ValueError("Then length of data '{}' and the length"
" of target '{}' are not "
"equal.".format(data.shape[0], len(target)))
self.data = data
self.target = target
@classmethod
def from_data_frame(cls, df, target_col=None):
if target_col is None:
# take the last column as target by default
target_col = df.columns[-1]
return cls(df.drop(target_col, 1), df[target_col])
@property
def index(self):
return self.data.index
def train_test_split(self, test_size=0.2, random_state=0):
x_train, x_test, y_train, y_test = model_selection.train_test_split(
self.data, self.target, test_size=test_size,
random_state=random_state
)
problem_train = Problem(x_train, y_train)
problem_test = Problem(x_test, y_test)
return problem_train, problem_test
def get_bootstrap_sample(self, random_state=0):
return Problem(*get_bootstrap_sample(self.data, self.target,
random_state))
def to_xgb_matrix(self):
return xgb.DMatrix(self.data, label=self.target)
def __len__(self):
return len(self.target)
|
apache-2.0
|
AndresYague/Snuppat
|
output/TDUTables/pocketCorrelations.py
|
1
|
3417
|
import sys
import scipy.stats as sts
import numpy as np
import matplotlib.pyplot as plt
def plotPocketSizes(fname, plotIndep = "lambda"):
'''Plot pocket sizes against core mass or lambda'''
# Get omega from title
omg = float(fname.split("Ov")[1].split(".")[0])*1e-2
# Ignore first x models
ignoreFirst = 0
# Now do the rest
pSizes = []; lambs = []; coreMass = []
intMass = []; integC13 = []; pIntegs = []
with open(fname, "r") as fread:
while True:
line = fread.readline()
if len(line) == 0:
break
if "Lambda" in line:
if ignoreFirst > 0:
ignoreFirst -= 1
continue
lnlst = line.split(";")
lamb = float(lnlst[0].split()[-1])
pockSize = float(lnlst[1].split()[-1])
cMass = float(lnlst[2].split()[-1])
iMass = float(lnlst[3].split()[-1])
if pockSize > 0:
lambs.append(lamb)
coreMass.append(cMass)
intMass.append(iMass)
pSizes.append(pockSize)
intg = 0
m1 = None; c131 = None
while True:
lnlst = map(float, fread.readline().split())
if len(lnlst) == 0:
break
m2 = lnlst[0]
c132 = lnlst[1]
if m1 is None:
m1 = m2; c131 = c132
continue
else:
intg += (m2 - m1)*(c131 + c132)*0.5
m1 = m2; c131 = c132
pIntegs.append(intg)
# Define dependent
dep = pSizes
#dep = pIntegs
# Define independent
if plotIndep == "lambda":
indep = map(lambda x: x, lambs)
elif plotIndep == "core mass":
indep = map(lambda x: x, coreMass)
elif plotIndep == "intershell mass":
indep = map(lambda x: x, intMass)
mm, nn, r_val, p_val, std_err = sts.linregress(indep, dep)
linPSiz = map(lambda x: mm*x + nn, indep)
lab = "r$^2 = {:.2f}$; ".format(r_val**2)
lab += "$\omega = {:.2f}$".format(omg)
lin = plt.plot(indep, dep, "o")
plt.plot(indep, linPSiz, lin[-1].get_color() + "-",
label = lab)
def main():
if len(sys.argv) < 2:
print("Usage: python {} profile1 <profile2 ...>".format(sys.argv[0]))
return 1
subplotNum = len(sys.argv[1:])
fig = plt.figure()
# Choose what to plot against
plotIndep = "lambda"
# Plot all the profiles
ii = 0; maxSaveMass = 0
for arch in sys.argv[1:]:
plotPocketSizes(arch, plotIndep = plotIndep)
if plotIndep == "lambda":
plt.xlabel("$\lambda$")
elif plotIndep == "core mass":
plt.xlabel("Core mass")
elif plotIndep == "intershell mass":
plt.xlabel("Intershell mass")
plt.ylabel("Pocket mass")
plt.legend(loc = 0)
plt.show()
if __name__ == "__main__":
main()
|
mit
|
jorge2703/scikit-learn
|
examples/gaussian_process/plot_gp_probabilistic_classification_after_regression.py
|
252
|
3490
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
==============================================================================
Gaussian Processes classification example: exploiting the probabilistic output
==============================================================================
A two-dimensional regression exercise with a post-processing allowing for
probabilistic classification thanks to the Gaussian property of the prediction.
The figure illustrates the probability that the prediction is negative with
respect to the remaining uncertainty in the prediction. The red and blue lines
corresponds to the 95% confidence interval on the prediction of the zero level
set.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from scipy import stats
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
from matplotlib import cm
# Standard normal distribution functions
phi = stats.distributions.norm().pdf
PHI = stats.distributions.norm().cdf
PHIinv = stats.distributions.norm().ppf
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = g(X)
# Instanciate and fit Gaussian Process Model
gp = GaussianProcess(theta0=5e-1)
# Don't perform MLE or you'll get a perfect prediction for this simple example!
gp.fit(X, y)
# Evaluate real function, the prediction and its MSE on a grid
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_pred, MSE = gp.predict(xx, eval_MSE=True)
sigma = np.sqrt(MSE)
y_true = y_true.reshape((res, res))
y_pred = y_pred.reshape((res, res))
sigma = sigma.reshape((res, res))
k = PHIinv(.975)
# Plot the probabilistic classification iso-values using the Gaussian property
# of the prediction
fig = pl.figure(1)
ax = fig.add_subplot(111)
ax.axes.set_aspect('equal')
pl.xticks([])
pl.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
pl.xlabel('$x_1$')
pl.ylabel('$x_2$')
cax = pl.imshow(np.flipud(PHI(- y_pred / sigma)), cmap=cm.gray_r, alpha=0.8,
extent=(- lim, lim, - lim, lim))
norm = pl.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = pl.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
pl.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
pl.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = pl.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.025], colors='b',
linestyles='solid')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.5], colors='k',
linestyles='dashed')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.975], colors='r',
linestyles='solid')
pl.clabel(cs, fontsize=11)
pl.show()
|
bsd-3-clause
|
tempbottle/opendht
|
python/tools/scanner.py
|
1
|
7200
|
#!/usr/bin/env python3
# Copyright (c) 2015 Savoir-Faire Linux Inc.
# Author: Adrien Béraud <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHERWISE
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import time, sys
from pprint import pprint
from math import cos, sin, pi
sys.path.append('..')
from opendht import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.colors import colorConverter
from matplotlib.collections import RegularPolyCollection
from matplotlib.widgets import Button
from mpl_toolkits.basemap import Basemap
import GeoIP
done = 0
all_nodes = NodeSet()
plt.ion()
plt.figaspect(2.)
fig, axes = plt.subplots(2, 1)
fig.set_size_inches(12,16,forward=True)
fig.tight_layout()
fig.canvas.set_window_title('OpenDHT scanner')
mpx = axes[0]
mpx.set_title("Node GeoIP")
m = Basemap(projection='robin', resolution = 'l', area_thresh = 1000.0, lat_0=0, lon_0=0, ax=mpx)
m.fillcontinents(color='#cccccc',lake_color='white')
m.drawparallels(np.arange(-90.,120.,30.))
m.drawmeridians(np.arange(0.,420.,60.))
m.drawmapboundary(fill_color='white')
plt.show()
ringx = axes[1]
ringx.set_title("Node IDs")
ringx.set_autoscale_on(False)
ringx.set_aspect('equal', 'datalim')
ringx.set_xlim(-2.,2.)
ringx.set_ylim(-1.5,1.5)
exitax = plt.axes([0.92, 0.95, 0.07, 0.04])
exitbtn = Button(exitax, 'Exit')
reloadax = plt.axes([0.92, 0.90, 0.07, 0.04])
button = Button(reloadax, 'Reload')
gi = GeoIP.open("GeoLiteCity.dat", GeoIP.GEOIP_INDEX_CACHE | GeoIP.GEOIP_CHECK_CACHE)
gi6 = GeoIP.open("GeoLiteCityv6.dat", GeoIP.GEOIP_INDEX_CACHE | GeoIP.GEOIP_CHECK_CACHE)
def gcb(v):
return True
r = DhtRunner()
i = Identity()
i.generate(bits = 1024)
r.run(i, port=4112)
r.bootstrap("bootstrap.ring.cx", "4222")
all_lines = []
plt.pause(2)
def step(cur_h, cur_depth):
global done, all_nodes, all_lines
done += 1
a = 2.*pi*cur_h.toFloat()
b = a + 2.*pi/(2**(cur_depth))
print("step", cur_h, cur_depth)
arc = ringx.add_patch(mpatches.Wedge([0.,0,], 1., a*180/pi, b*180/pi, fill=True, color="blue", alpha=0.5))
lines = ringx.plot([0, cos(a)], [0, sin(a)], 'k-', lw=1.2)
all_lines.extend(lines)
r.get(cur_h, gcb, lambda d, nodes: nextstep(cur_h, cur_depth, d, nodes, arc=arc, lines=lines))
def nextstep(cur_h, cur_depth, ok, nodes, arc=None, lines=[]):
global done, all_nodes
if arc:
arc.remove()
del arc
for l in lines:
l.set_color('#444444')
snodes = NodeSet()
snodes.extend(nodes)
all_nodes.extend(nodes)
depth = min(6, InfoHash.commonBits(snodes.first(), snodes.last())+4)
if cur_depth < depth:
for b in range(cur_depth, depth):
new_h = InfoHash(cur_h.toString());
new_h.setBit(b, 1);
step(new_h, b+1);
done -= 1
run = True
def exitcb(arg):
global run
run = False
exitbtn.on_clicked(exitcb)
def restart(arg):
global collection, all_lines, points
for l in all_lines:
l.remove()
del l
all_lines = []
if collection:
collection.remove()
del collection
collection = None
for p in points:
p.remove()
del p
points = []
print(arg)
start_h = InfoHash()
start_h.setBit(159, 1)
step(start_h, 0)
plt.draw()
collection = None
points = []
not_found = []
def generate_set():
node_ipv4 = {}
node_ipv6 = {}
for n in all_nodes:
addr = b''.join(n.getNode().getAddr().split(b':')[0:-1]).decode()
if addr[0] == '[':
addr = addr[1:-1]
if addr in node_ipv6:
node_ipv6[addr][1] = 1
else:
node_ipv6[addr] = [n, 1]
else:
if addr in node_ipv4:
node_ipv4[addr][1] += 1
else:
node_ipv4[addr] = [n, 1]
return node_ipv4, node_ipv6
def update_plot():
global done, m, collection, not_found, points
for p in points:
p.remove()
del p
points = []
lats = []
lons = []
cities=[]
colors=[]
not_found.clear()
ip4s, ip6s = generate_set()
ares = []
for addr, n in ip4s.items():
ares.append((addr, n[0].getNode(), gi.record_by_name(addr)))
for addr, n in ip6s.items():
ares.append((addr, n[0].getNode(), gi6.record_by_name_v6(addr)))
for r in ares:
res = r[2]
n = r[1]
if res:
lats.append(res['latitude'])
lons.append(res['longitude'])
cities.append(res['city'] if res['city'] else (str(int(res['latitude']))+'-'+str(int(res['longitude']))))
colors.append('red' if n.isExpired() else 'blue')
else:
not_found.append(r[0])
x,y = m(lons,lats)
points.extend(m.plot(x,y,'bo'))
for name, xpt, ypt in zip(cities, x, y):
points.append(mpx.text(xpt+50000, ypt+50000, name))
node_val = [n.getId().toFloat() for n in all_nodes]
xys = [(cos(d*2*pi), sin(d*2*pi)) for d in node_val]
if collection:
collection.remove()
del collection
collection = None
collection = ringx.add_collection(RegularPolyCollection(
fig.dpi, 6, sizes=(10,), facecolors=colors,
offsets = xys, transOffset = ringx.transData))
if run:
# start first step
start_h = InfoHash()
start_h.setBit(159, 1)
step(start_h, 0)
def d(arg):
pass
while run:
while run and done > 0:
update_plot()
plt.draw()
plt.pause(.5)
if not run:
break
button.on_clicked(restart)
node_ip4s, node_ip6s = generate_set()
print(all_nodes.size(), " nodes found")
print(all_nodes)
print(len(not_found), " nodes not geolocalized")
for n in not_found:
print(n)
print('')
print(len(node_ip4s), " different IPv4s :")
for ip in node_ip4s.items():
print(ip[0] + " : " + str(ip[1][1]) + " nodes")
print('')
print(len(node_ip6s), " different IPv6s :")
for ip in node_ip6s.items():
print(ip[0] + " : " + str(ip[1][1]) + " nodes")
while run and done == 0:
plt.pause(.5)
button.on_clicked(d)
plt.draw()
all_nodes = []
r.join()
|
gpl-3.0
|
tqdv/info-lstl
|
Info/TPs/2017-12-11_voyageur_commerce.py
|
1
|
22738
|
# -*- coding: UTF-8 -*-
# TP : Problème du voyageur de commerce (dans le plan)
# Naif_1 : chemin aléatoire
# Naif_2 : tous les chemins
# Glouton_1 : point le plus proche du dernier
# Glouton_1_1 : Glouton_1 pas à pas
# Glouton_1_2 : Glouton_1 à partir de chaque point
# Glouton_2 : insérer dans le segment qui minimise le poids
# (points dans l'ordre)
# Glouton_2_1 : Glouton_2 pas à pas
# Glouton_2_2 : Glouton_2 pour chaque point
# Glouton_2_2_1 : Glouton_2_2 pas à pas (pause de 0.5 s)
# Glouton_2_3 : Glouton_2_2 à partir de chaque point
#
# test1 : aléatoire avec 10 points
# test2 : aléatoire avec 10 points et affiche le graphe
# test3 : Naif_1 avec 10 points et 100.000 essais
# test4 : débogage de Naif_2
# test5 : compare Naif_1 et Naif_2
# test6 : Glouton_1 avec 30 points
# test7 : Glouton_1_1 avec 80 points
# test8 : Glouton_1_2 avec 40 points
# test9 : Glouton_2 avec 50 points
# test10 : Glouton_2_1 avec 50 points
# test11 : Glouton_2_2 avec 30 points
# test12 : Glouton_2_2_1 avec 30 points
# test13 : Glouton_2_3 avec 30 points
# J'ai passé tellement de temps sur ces fonctions que je pense qu'il est
# judicieux que je les mette en valeur :
# - PermutationSuivante_1
# - CombiSuivant_1
from random import random, shuffle, randint
from numpy import log as ln, ceil
from math import factorial, sqrt
from matplotlib import use
use("TkAgg")
from matplotlib.pyplot import (
plot, scatter, show, close, interactive, isinteractive, clf, draw, pause
)
interactive(True) # Automatic if backend is interactive
def Nuage(N):
X = [random() for _ in range(N)]
Y = [random() for _ in range(N)]
return X, Y
# Déplace le barycentre en 0, puis on divise par le maximum des normes
# Préserve les rapports de distance
# C : liste de sommets qui représente un chemin
def DessinerChemin(X, Y, C):
XC = [X[c] for c in C]
YC = [Y[c] for c in C]
# Dessin du chemin
plot(XC, YC, color=[0.5, 0, 0.75])
# Dessin du nuage
scatter(X, Y, color=[0.7, 0.2, 0.9])
show()
def CH_aleatoire(N):
L = list(range(N))
shuffle(L)
L.append(L[0])
return L
def test1():
(X, Y) = Nuage(10)
C = CH_aleatoire(10)
G = mat_adjacence(X, Y)
return Poids(G, C)
def distance(x1, y1, x2, y2):
return sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
def mat_adjacence(X, Y):
n = len(X)
return [[distance(X[i], Y[i], X[j], Y[j]) for j in range(n)]
for i in range(n)]
def Poids(G, C):
p = 0
for i in range(len(C) - 1):
p += G[C[i]][C[i + 1]]
return p
def test2():
(X, Y) = Nuage(10)
C = CH_aleatoire(10)
DessinerChemin(X, Y, C)
G = mat_adjacence(X, Y)
return Poids(G, C)
def Naif_1(X, Y, NB_ESSAIS=100):
if not NB_ESSAIS > 0:
return
n = len(X)
G = mat_adjacence(X, Y)
C = minC = CH_aleatoire(n)
p = minp = Poids(G, C)
for i in range(NB_ESSAIS - 1):
C = CH_aleatoire(n)
p = Poids(G, C)
if p < minp:
minp = p
minC = C
DessinerChemin(X, Y, minC)
return minp
def test3():
X, Y = Nuage(10)
print(Naif_1(X, Y, 100000))
# Aparté sur la probabilité que l'algorithme Naif_1 trouve la solution optimale
def proba(N):
return 2 / factorial(N - 1)
# nb essais pour trouver optimal pour à alpha% erreur pour N sommets
def Trouver_K(N, alpha):
return int(ceil(ln(1 - alpha) / ln(1 - proba(N))))
# Fin de l'aparté
def UpletSuivant(L):
n = len(L)
i = len(L) - 1
while i >= 0:
if L[i] < n - 1:
L[i] += 1
return L
else:
L[i] = 0
i -= 1
return None
def EstPermutation(L):
vu = [False for _ in range(len(L))]
for i in L:
vu[i] = True
for i in vu:
if not i:
return False
return True
def PermutationSuivante(L):
N = UpletSuivant(L)
if N is None:
return None
while not EstPermutation(N):
N = UpletSuivant(N)
if N is None:
return None
return N
def PermutationSuivante_1(L):
n = len(L)
if not n > 0:
return None
# NB : copier-coller de CombiSuivant_1 avec lt -> n et T -> L
# PermutationSuivante_1 est un cas particulier de CombiSuivant_1
# avec `len(T) == n`
# NB : pos peut être une liste de booléens (?)
# Vérifie que L soit une combinaison, et dans le cas contraire,
# modifie L pour qu'elle le soit. Construit aussi `pos`
# pos : Position de l'élément dans la combinaison
pos = [None] * n
i = 0
correct = True
while i < n and correct:
if not L[i] < n:
return None
if L[i] < 0:
L[i] = 0
correct = False
elif pos[L[i]] is None:
pos[L[i]] = i
i += 1
else:
# 0 <= L[i] < n and pos[L[i]] != None
correct = False
if not correct:
# `i_erreur` indice de la première erreur
i_erreur = i
val_erreur = L[i_erreur]
trouve = False
while not i < 0 and not trouve:
# Cherche une valeur supérieure
cur_val = L[i]
j = cur_val + 1
if cur_val < 0:
j = 0
while j < n and pos[j] is not None:
j += 1
if j == n:
# Pas trouvé, on libère la place et on passe au suivant
# Sauf si c'est `i_erreur`, car sa valeur a déjà été rencontrée
# T/S : si le code casse, vérifier ici
if i != i_erreur:
pos[cur_val] = None
i -= 1
else:
nv_val = j
L[i] = j
pos[nv_val] = i
# T/S : je ne sais pas pourquoi
if not cur_val < 0 and i != i_erreur:
pos[cur_val] = None
trouve = True
if i < 0:
# Pas moyen d'avoir une combinaison correcte (?)
return None
# Complète les éléments suivants de L (dans l'ordre croissant)
i += 1
j = 0 # Parcoure `pos`, c'est la valeur de l'élément
while i < n:
while j < n and pos[j] is not None:
j += 1
if j == n:
raise Exception("Impossible (?), cf code")
# Pas assez d'éléments disponibles dans pos (plus de n élts)
# C'est impossible (?)
L[i] = j
# pos[j] = i
j += 1
i += 1
return L
# NB : Fin du copier-coller
# On cherche la plus longue suite décroissante en partant de la fin
i = n - 2 # i indice de l'élément avant la suite décroissante
trouve = False
while not i < 0 and not trouve:
if not L[i] > L[i+1]:
trouve = True
else:
i -= 1
if i < 0:
return None
# On remet les éléments dans l'ordre
# i _ k -> <- j _ n
j = n - 1
k = i + 1
while j > k:
L[k], L[j] = L[j], L[k]
j -= 1
k += 1
# Place L[i] à sa nouvelle place
j = i + 1
while L[j] < L[i]:
j += 1
if j == n:
raise Exception("Pas d'élément supérieur dans le reste !?")
L[i], L[j] = L[j], L[i]
return L
def Naif_2(X, Y):
P = [0 for _ in range(len(X))]
C = C_min = P + [P[0]]
if not EstPermutation(P):
P = PermutationSuivante(P)
C = C_min = P + [P[0]]
G = mat_adjacence(X, Y)
p = p_min = Poids(G, C)
P = PermutationSuivante(P)
while P is not None:
C = P + [P[0]]
p = Poids(G, C)
if p < p_min:
p_min = p
C_min = C
P = PermutationSuivante(P)
DessinerChemin(X, Y, C_min)
return p_min
# Je saute `EsperancepoidsMinimal(N)`
def PlusProcheParmi(G, x, V):
if not len(V) > 0:
return
n = len(V)
P = min_P = V[0]
d = min_d = G[x][P]
for i in range(1, n):
P = V[i]
d = G[x][P]
if d < min_d:
min_P = P
min_d = d
return min_P
def test4():
X = [0, 0, 1]
Y = [0, 1, 0]
print "poids min :", Naif_1(X, Y)
clf()
return Naif_2(X, Y)
def test5():
X, Y = Nuage(7)
p1 = Naif_1(X, Y)
print "1:", p1
p2 = Naif_2(X, Y)
print "2:", p2
# Pour ne pas utiliser list.remove()
def retirer_elem(L, x):
n = len(L)
i = 0
while i < n and L[i] != x:
i += 1
if i == n:
raise Exception("Élément absent de la liste")
L = L[:i] + L[i + 1:]
return L
# Et si je veux modifier sur place ?
# Ceci est probablement horrible niveau complexité
def retirer_elem_en_place(L, x):
n = len(L)
i = 0
while i < n and L[i] != x:
i += 1
if i == n:
raise Exception("Élément absent de la liste")
while i + 1 < n:
L[i], L[i + 1] = L[i + 1], L[i]
i += 1
L.pop()
# Crée un circuit en prenant le point le plus proche du dernier
def Glouton_1(X, Y):
n = len(X)
p = randint(0, n - 1)
G = mat_adjacence(X, Y)
C = [p]
V = list(range(n))
V.remove(p)
while V:
p = PlusProcheParmi(G, p, V)
C.append(p)
V.remove(p)
C.append(C[0])
DessinerChemin(X, Y, C)
G = mat_adjacence(X, Y)
return Poids(G, C)
# On modifie pour pouvoir voir le tracé étape par étape
def DessinerChemin_1(X, Y, C):
XC = [X[c] for c in C]
YC = [Y[c] for c in C]
clf()
# Dessin du chemin
plot(XC, YC, color=[0.5, 0, 0.75])
# Dessin du nuage
scatter(X, Y, color=[0.7, 0.2, 0.9])
draw()
def Glouton_1_1(X, Y):
n = len(X)
p = randint(0, n - 1)
G = mat_adjacence(X, Y)
C = [p]
V = list(range(n))
V.remove(p)
DessinerChemin_1(X, Y, C)
show()
while V:
p = PlusProcheParmi(G, p, V)
C.append(p)
V.remove(p)
DessinerChemin_1(X, Y, C)
C.append(C[0])
DessinerChemin(X, Y, C)
G = mat_adjacence(X, Y)
return Poids(G, C)
def test6():
X, Y = Nuage(30)
return Glouton_1(X, Y)
def test7():
X, Y = Nuage(80)
return Glouton_1_1(X, Y)
# On n'obtient pas toujours le même circuit lorsque l'on part de sommets
# différents
# Même chose, mais cette fois-ci, on teste tous les points de départ
def Glouton_1_2(X, Y):
# o = origine
# p = poids
n = len(X)
if not n:
raise Exception("Pas de points")
G = mat_adjacence(X, Y)
o_min = 0
def aux(o):
P = 0
C = [o]
V = list(range(n))
V.remove(o)
p = o
while V:
p1 = PlusProcheParmi(G, p, V)
C.append(p1)
V.remove(p1)
P += G[p][p1]
p = p1
C.append(o)
P += G[p][o]
return C, P
C_min, P_min = aux(o_min)
for i in range(1, n):
C, P = aux(i)
if P < P_min:
C_min = C
o_min = i
P_min = P
DessinerChemin(X, Y, C_min)
return o_min, P_min
def test8():
X, Y = Nuage(40)
return Glouton_1_2(X, Y)
# Si list.insert(i, x) n'est pas autorisé
def InsererDansListe(L, i, x):
return L[:i] + [x] + L[i:]
# Et sur place (avec list.pop() et list.append())
def InsererDansListe_1(L, i, x):
n = len(L) - 1 - i
R = []
for j in range(n):
if L:
R.append(L.pop())
L.append(x)
while R:
L.append(R.pop())
# Sur place encore
def InsererDansListe_2(L, i, x):
n = len(L)
t = x
if i < -n:
i = 0
elif -n <= i < 0:
i = n + i
for i in range(i, n):
L[i], t = t, L[i]
L.append(t)
# C est un circuit
def InsererAuMieux(G, x, C):
n = len(C)
if n == 0:
C.append(x)
return
if n == 1:
C.append(x)
C.append()
return
p_min = G[C[0]][x] + G[x][C[1]]
# i_min est l'indice d'insertion
# i est l'indice de test (càd "i_min" - 1)
i_min = 1
i = 1
while i + 1 < n:
p = G[C[i]][x] + G[x][C[i + 1]]
if p < p_min:
p_min = p
i_min = i + 1
i += 1
C.insert(i_min, x)
# Si list.pop(i) n'est pas autorisé
# Ne sera pas corrigé :
# >>> L = list(range(10))
# >>> RetirerParIndice(L, -1)
# [0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def RetirerParIndice(L, i):
return L[:i] + L[i + 1:]
# Et sur place (avec list.pop() et list.append())
# Cette implémentation ne prend pas en charge l'accès par indices négatifs
def RetirerParIndice_1(L, i):
n = len(L) - 1 - i
if n < 0:
return
R = []
for i in range(n):
if L:
R.append(L.pop())
if L:
L.pop()
while R:
L.append(R.pop())
def RetirerParIndice_2(L, i):
n = len(L)
if i < -n:
i = 0
elif -n <= i < 0:
i += n
for j in range(i, n - 1):
L[j] = L[j + 1]
L.pop()
def Glouton_2(X, Y):
n = len(X)
if n < 3:
return
C = []
R = list(range(n))
for i in range(3):
p = randint(0, n - 1 - i)
C.append(R[p])
R.pop(p) # ou `del R[p]`
C.append(C[0])
G = mat_adjacence(X, Y)
for x in R:
InsererAuMieux(G, x, C)
DessinerChemin(X, Y, C)
return Poids(G, C)
def test9():
X, Y = Nuage(50)
return Glouton_2(X, Y)
# Et avec animation !
def Glouton_2_1(X, Y):
n = len(X)
if n < 3:
return
C = []
R = list(range(n))
for i in range(3):
p = randint(0, n - 1 - i)
C.append(R[p])
R.pop(p) # ou `del R[p]`
C.append(C[0])
DessinerChemin_1(X, Y, C)
show()
G = mat_adjacence(X, Y)
for x in R:
InsererAuMieux(G, x, C)
DessinerChemin_1(X, Y, C)
return Poids(G, C)
def test10():
X, Y = Nuage(50)
return Glouton_2_1(X, Y)
# Retourne un indice d'insertion et un poids
def InsererAuMieux_2(G, x, C):
n = len(C)
if n == 0:
C.append(x)
return
if n == 1:
C.append(x)
C.append()
return
p_min = G[C[0]][x] + G[x][C[1]]
# i_min est l'indice d'insertion
# i est l'indice de test (càd "i_min" - 1)
i_min = 1
i = 1
while i + 1 < n:
p = G[C[i]][x] + G[x][C[i + 1]]
if p < p_min:
p_min = p
i_min = i + 1
i += 1
return i_min, p_min
# Et si on veut une figure (à peu près) convexe ?
# Prendre le point le plus proche du circuit et l'insérer
def Glouton_2_2(X, Y):
n = len(X)
if n < 3:
return
C = []
R = list(range(n))
for i in range(3):
p = randint(0, n - 1 - i)
C.append(R[p])
R.pop(p)
C.append(C[0])
G = mat_adjacence(X, Y)
while R:
nb_reste = len(R)
insertion_min, poids_min = InsererAuMieux_2(G, R[0], C)
x_min = R[0]
i_min = 0
for i in range(1, nb_reste):
insertion_x, poids_x = InsererAuMieux_2(G, R[i], C)
if poids_x < poids_min:
insertion_min = insertion_x
poids_min = poids_x
x_min = R[i]
i_min = i
C.insert(insertion_min, x_min)
R.pop(i_min)
DessinerChemin(X, Y, C)
return Poids(G, C), C
def test11():
X, Y = Nuage(30)
return Glouton_2_2(X, Y)
# Et avec animation ! Encore !
def Glouton_2_2_1(X, Y):
n = len(X)
if n < 3:
return
C = []
R = list(range(n))
for i in range(3):
p = randint(0, n - 1 - i)
C.append(R[p])
R.pop(p)
C.append(C[0])
DessinerChemin_1(X, Y, C)
show()
pause(0.5)
G = mat_adjacence(X, Y)
while R:
nb_reste = len(R)
insertion_min, poids_min = InsererAuMieux_2(G, R[0], C)
x_min = R[0]
i_min = 0
for i in range(1, nb_reste):
insertion_x, poids_x = InsererAuMieux_2(G, R[i], C)
if poids_x < poids_min:
insertion_min = insertion_x
poids_min = poids_x
x_min = R[i]
i_min = i
C.insert(insertion_min, x_min)
DessinerChemin_1(X, Y, C)
pause(0.5)
R.pop(i_min)
return Poids(G, C)
def test12():
X, Y = Nuage(30)
return Glouton_2_2_1(X, Y)
# Ici, on ne vérifie seulement la croissance car c'est un triangle
# ... si ce n'était pas une fonction générique
def EstCombi(T, n):
vu = [False] * n
for i in T:
if not vu[i]:
vu[i] = True
else:
return False
return True
def CombiSuivant(T, n):
def TripletSuivant(T, n):
l = len(T)
T[l-1] += 1
i = l - 1
while not i < 0 and not T[i] < n:
T[i] %= n
i -= 1
T[i] += 1
if i < 0:
return None
return T
T = TripletSuivant(T, n)
if T is None:
return None
while not EstCombi(T, n):
T = TripletSuivant(T, n)
if T is None:
return None
return T
def CombiSuivant_1(T, n):
lt = len(T)
if not lt > 0:
return None
# Vérifie si T est une combinaison, et dans le cas contraire, renvoie
# l'indice de la première erreur. Remplit aussi partiellement `pos`
def EstCombiErreur(T, pos):
lt = len(T)
n = len(pos)
i = 0
i_erreur = None
while i < lt and i_erreur is None:
if not T[i] < n:
i_erreur = i
elif T[i] < 0:
i_erreur = i
elif pos[T[i]] is not None:
i_erreur = i
else:
pos[T[i]] = i
i += 1
return i_erreur
def ComplSuiv(T, pos, i_correct):
# Complète les éléments suivants de T (dans l'ordre croissant)
i = i_correct + 1
j = 0 # Parcoure `pos`, c'est la valeur de l'élément
while i < lt:
while j < n and pos[j] is not None:
j += 1
if j == n:
raise Exception("Impossible (?), cf code")
# Pas assez d'éléments disponibles dans pos (plus de n élts)
# C'est impossible (?)
T[i] = j
# pos[j] = i
j += 1
i += 1
return T
# Prend `pos` et `i_erreur` initialisés par `EstCombiErreur` et renvoie
# la combinaison (strictement, car `i_erreur`) supérieure
def CombiSup(T, pos, i_erreur):
n = len(pos)
lt = len(T)
# On cherche à valider le début de la combinaison :
# on parcoure T de i_erreur vers 0
i = i_erreur
trouve = False
while i >= 0 and not trouve:
# Cherche une valeur supérieure
# L'élément considéré n'a pas de valeur dans `pos`
cur_val = T[i]
if cur_val >= n:
i -= 1
if i >= 0:
pos[T[i]] = None
elif cur_val < -1:
# Car on cherche toujours une valeur strictement supérieure
T[i] = -1
else:
# On cherche toujours une valeur sup (donc -1 au lieu de 0)
j = cur_val + 1
while j < n and pos[j] is not None:
j += 1
if j == n:
# Pas trouvé, on passe au suivant
i -= 1
# L'élément considéré ne doit pas avoir de position !
if i >= 0:
pos[T[i]] = None
else:
nv_val = j
T[i] = j
pos[nv_val] = i
trouve = True
if i < 0:
# Pas moyen d'avoir une combinaison correcte
return None
i_correct = i
return ComplSuiv(T, pos, i_correct)
# NB : `pos` peut être une liste de booléens (?)
# pos : Position de l'élément dans la combinaison
pos = [None] * n
# On cherche le suivant
T[-1] += 1
i_erreur = EstCombiErreur(T, pos)
print i_erreur # DEBUG
if i_erreur is None: # T (modifié est une combinaison)
return T
else: # i_erreur == 1..n-1
return CombiSup(T, pos, i_erreur)
def Glouton_2_3(X, Y):
def Glouton_2_2_alt(X, Y, C):
n = len(C)
if n < 3:
return
R = list(range(len(X)))
for i in range(3):
R.remove(C[i])
G = mat_adjacence(X, Y)
while R:
nb_reste = len(R)
insertion_min, poids_min = InsererAuMieux_2(G, R[0], C)
x_min = R[0]
i_min = 0
for i in range(1, nb_reste):
insertion_x, poids_x = InsererAuMieux_2(G, R[i], C)
if poids_x < poids_min:
insertion_min = insertion_x
poids_min = poids_x
x_min = R[i]
i_min = i
C.insert(insertion_min, x_min)
R.pop(i_min)
DessinerChemin(X, Y, C)
return Poids(G, C), C
T = [0] * 3
if not EstCombi(T, 3):
T = CombiSuivant(T, 3)
if T is None:
# Pas de combinaison !?
return None
T_min = list(T)
poids_min, C_min = Glouton_2_2_alt(X, Y, list(T))
T = CombiSuivant(T, 3)
while T is not None:
poids_i, C_i = Glouton_2_2_alt(X, Y, list(T))
if poids_i < poids_min:
poids_min = poids_i
C_min = C_i
T_min = list(T)
T = CombiSuivant(T, 3)
DessinerChemin(X, Y, C_min)
return T_min, poids_min, C_min
def test13():
X, Y = Nuage(30)
return Glouton_2_3(X, Y)
|
gpl-3.0
|
zorroblue/scikit-learn
|
sklearn/tests/test_dummy.py
|
8
|
18272
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_almost_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_almost_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_almost_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_almost_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_almost_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_almost_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
def test_dummy_classifier_on_nan_value():
X = [[np.NaN]]
y = [1]
y_expected = [1]
clf = DummyClassifier()
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_equal(y_pred, y_expected)
def test_dummy_regressor_on_nan_value():
X = [[np.NaN]]
y = [1]
y_expected = [1]
clf = DummyRegressor()
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_equal(y_pred, y_expected)
|
bsd-3-clause
|
ishank08/scikit-learn
|
examples/linear_model/lasso_dense_vs_sparse_data.py
|
348
|
1862
|
"""
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
|
bsd-3-clause
|
DarkEnergyScienceCollaboration/Twinkles
|
tests/test_opSimOrdering.py
|
2
|
3470
|
from __future__ import absolute_import
import os
import numpy as np
import pandas as pd
from desc.twinkles import OpSimOrdering
from lsst.utils import getPackageDir
import unittest
class TestOpSimOrdering(unittest.TestCase):
def setUp(self):
twinklesdir = getPackageDir('Twinkles')
self.opSimDBPath = os.path.join(twinklesdir, 'data',
'enigma_1189_micro.db')
self.ops = OpSimOrdering(self.opSimDBPath, timeMax=100., randomForestPickle=None)
self.numRecords = len(self.ops.filteredOpSim)
self.numUniqueRecords = len(self.ops.uniqueOpSimRecords)
def test_numRecords(self):
"""
Given the setup where no records are dropped due to predictedPhoSimTime
being too long, show that the filtered OpSim records (in favor of lower
propID belonging to the OpSim WFD proposals has the same number of records
as dropping duplicates.
"""
pts = self.ops.fullOpSimDF(self.opSimDBPath)
pts.drop_duplicates(subset='obsHistID', inplace=True)
self.assertEqual(len(pts), self.numUniqueRecords)
self.assertGreaterEqual(self.numUniqueRecords, self.numRecords)
def test_conservedNumRecordsInSplit(self):
"""
Check that all of the records in the filtered OpSim are in one of the
three splits by first checking that the sum of numbers of records in
each split matches the number of records in the filtered OpSim
"""
n1 = self.ops.Twinkles_3p1.obsHistID.size
n2 = self.ops.Twinkles_3p2.obsHistID.size
n3 = self.ops.Twinkles_3p3.obsHistID.size
self.assertEqual(n1 + n2 + n3, self.numRecords)
def test_uniqueObsHistIDs(self):
"""
Check that when all the obsHistIDs in the splits are combined
together they are all unique. This completes the check that all of
the records in the filtered OpSim are in one of the splits and also
that no pointing is in two splits
"""
v1 = self.ops.Twinkles_3p1.obsHistID.values.tolist()
v2 = self.ops.Twinkles_3p2.obsHistID.values.tolist()
v3 = self.ops.Twinkles_3p3.obsHistID.values.tolist()
arr = np.array(v1 + v2 + v3)
self.assertEqual(np.unique(arr).size, self.numRecords)
def test_Twink_3p1_uniqueCombinations(self):
"""
By the definition of the process, Twink_3p_1 should have one
and only one record representing each each unique combination
"""
v1 = self.ops.Twinkles_3p1.obsHistID.values.tolist()
df = self.ops.filteredOpSim.set_index('obsHistID').ix[v1]
# number of members in each unique combination of night and filter
nums = df.groupby(['night', 'filter']).expMJD.count().unique().tolist()
self.assertEqual(len(nums), 1)
self.assertEqual(nums[0], 1)
def test_Twink_3p1_allUniqueCombinations(self):
"""
Check that Twink_3p_1 includes a record for each unique combination in
filteredOpSim
"""
v1 = self.ops.Twinkles_3p1.obsHistID.values.tolist()
df = self.ops.filteredOpSim.set_index('obsHistID').ix[v1]
Twink_3p1_Groups = df.groupby(['night', 'filter']).groups.keys()
Orig_groups = self.ops.filteredOpSim.groupby(['night', 'filter']).groups.keys()
self.assertEqual(len(Twink_3p1_Groups), len(Orig_groups))
if __name__ == '__main__':
unittest.main()
|
mit
|
massmutual/scikit-learn
|
sklearn/cluster/tests/test_hierarchical.py
|
230
|
19795
|
"""
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
|
bsd-3-clause
|
webmasterraj/FogOrNot
|
flask/lib/python2.7/site-packages/pandas/io/tests/test_excel.py
|
2
|
60224
|
# pylint: disable=E1101
from pandas.compat import u, range, map, openpyxl_compat
from datetime import datetime, date, time
import sys
import os
from distutils.version import LooseVersion
import operator
import functools
import nose
from numpy import nan
import numpy as np
from numpy.testing.decorators import slow
from pandas import DataFrame, Index, MultiIndex
from pandas.io.parsers import read_csv
from pandas.io.excel import (
ExcelFile, ExcelWriter, read_excel, _XlwtWriter, _Openpyxl1Writer,
_Openpyxl2Writer, register_writer, _XlsxWriter
)
from pandas.io.common import URLError
from pandas.util.testing import ensure_clean
from pandas.core.config import set_option, get_option
import pandas.util.testing as tm
import pandas as pd
def _skip_if_no_xlrd():
try:
import xlrd
ver = tuple(map(int, xlrd.__VERSION__.split(".")[:2]))
if ver < (0, 9):
raise nose.SkipTest('xlrd < 0.9, skipping')
except ImportError:
raise nose.SkipTest('xlrd not installed, skipping')
def _skip_if_no_xlwt():
try:
import xlwt # NOQA
except ImportError:
raise nose.SkipTest('xlwt not installed, skipping')
def _skip_if_no_openpyxl():
try:
import openpyxl # NOQA
except ImportError:
raise nose.SkipTest('openpyxl not installed, skipping')
def _skip_if_no_xlsxwriter():
try:
import xlsxwriter # NOQA
except ImportError:
raise nose.SkipTest('xlsxwriter not installed, skipping')
def _skip_if_no_excelsuite():
_skip_if_no_xlrd()
_skip_if_no_xlwt()
_skip_if_no_openpyxl()
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)[:10]
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])[:10]
_tsframe = tm.makeTimeDataFrame()[:5]
_mixed_frame = _frame.copy()
_mixed_frame['foo'] = 'bar'
class SharedItems(object):
def setUp(self):
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
self.xlsx1 = os.path.join(self.dirpath, 'test.xlsx')
self.multisheet = os.path.join(self.dirpath, 'test_multisheet.xlsx')
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
class ExcelReaderTests(SharedItems, tm.TestCase):
def test_parse_cols_int(self):
_skip_if_no_openpyxl()
_skip_if_no_xlrd()
suffix = ['xls', 'xlsx', 'xlsm']
for s in suffix:
pth = os.path.join(self.dirpath, 'test.%s' % s)
xls = ExcelFile(pth)
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols=3)
df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['A', 'B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True, parse_cols=3)
# TODO add index to xls file)
tm.assert_frame_equal(df, df2, check_names=False)
tm.assert_frame_equal(df3, df2, check_names=False)
def test_parse_cols_list(self):
_skip_if_no_openpyxl()
_skip_if_no_xlrd()
suffix = ['xls', 'xlsx', 'xlsm']
for s in suffix:
pth = os.path.join(self.dirpath, 'test.%s' % s)
xls = ExcelFile(pth)
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols=[0, 2, 3])
df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True,
parse_cols=[0, 2, 3])
# TODO add index to xls file)
tm.assert_frame_equal(df, df2, check_names=False)
tm.assert_frame_equal(df3, df2, check_names=False)
def test_parse_cols_str(self):
_skip_if_no_openpyxl()
_skip_if_no_xlrd()
suffix = ['xls', 'xlsx', 'xlsm']
for s in suffix:
pth = os.path.join(self.dirpath, 'test.%s' % s)
xls = ExcelFile(pth)
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols='A:D')
df2 = read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['A', 'B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True, parse_cols='A:D')
# TODO add index to xls, read xls ignores index name ?
tm.assert_frame_equal(df, df2, check_names=False)
tm.assert_frame_equal(df3, df2, check_names=False)
del df, df2, df3
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols='A,C,D')
df2 = read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True,
parse_cols='A,C,D')
# TODO add index to xls file
tm.assert_frame_equal(df, df2, check_names=False)
tm.assert_frame_equal(df3, df2, check_names=False)
del df, df2, df3
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols='A,C:D')
df2 = read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True,
parse_cols='A,C:D')
tm.assert_frame_equal(df, df2, check_names=False)
tm.assert_frame_equal(df3, df2, check_names=False)
def test_excel_stop_iterator(self):
_skip_if_no_xlrd()
excel_data = ExcelFile(os.path.join(self.dirpath, 'test2.xls'))
parsed = excel_data.parse('Sheet1')
expected = DataFrame([['aaaa', 'bbbbb']], columns=['Test', 'Test1'])
tm.assert_frame_equal(parsed, expected)
def test_excel_cell_error_na(self):
_skip_if_no_xlrd()
excel_data = ExcelFile(os.path.join(self.dirpath, 'test3.xls'))
parsed = excel_data.parse('Sheet1')
expected = DataFrame([[np.nan]], columns=['Test'])
tm.assert_frame_equal(parsed, expected)
def test_excel_passes_na(self):
_skip_if_no_xlrd()
excel_data = ExcelFile(os.path.join(self.dirpath, 'test2.xlsx'))
parsed = excel_data.parse('Sheet1', keep_default_na=False,
na_values=['apple'])
expected = DataFrame([['NA'], [1], ['NA'], [np.nan], ['rabbit']],
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
parsed = excel_data.parse('Sheet1', keep_default_na=True,
na_values=['apple'])
expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']],
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
def check_excel_table_sheet_by_index(self, filename, csvfile):
import xlrd
pth = os.path.join(self.dirpath, filename)
xls = ExcelFile(pth)
df = xls.parse(0, index_col=0, parse_dates=True)
df2 = self.read_csv(csvfile, index_col=0, parse_dates=True)
df3 = xls.parse(1, skiprows=[1], index_col=0, parse_dates=True)
tm.assert_frame_equal(df, df2, check_names=False)
tm.assert_frame_equal(df3, df2, check_names=False)
df4 = xls.parse(0, index_col=0, parse_dates=True, skipfooter=1)
df5 = xls.parse(0, index_col=0, parse_dates=True, skip_footer=1)
tm.assert_frame_equal(df4, df.ix[:-1])
tm.assert_frame_equal(df4, df5)
self.assertRaises(xlrd.XLRDError, xls.parse, 'asdf')
def test_excel_table_sheet_by_index(self):
_skip_if_no_xlrd()
for filename, csvfile in [(self.xls1, self.csv1),
(self.xlsx1, self.csv1)]:
self.check_excel_table_sheet_by_index(filename, csvfile)
def test_excel_table(self):
_skip_if_no_xlrd()
pth = os.path.join(self.dirpath, 'test.xls')
xls = ExcelFile(pth)
df = xls.parse('Sheet1', index_col=0, parse_dates=True)
df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0, parse_dates=True)
tm.assert_frame_equal(df, df2, check_names=False)
tm.assert_frame_equal(df3, df2, check_names=False)
df4 = xls.parse('Sheet1', index_col=0, parse_dates=True,
skipfooter=1)
df5 = xls.parse('Sheet1', index_col=0, parse_dates=True,
skip_footer=1)
tm.assert_frame_equal(df4, df.ix[:-1])
tm.assert_frame_equal(df4, df5)
def test_excel_read_buffer(self):
_skip_if_no_xlrd()
_skip_if_no_openpyxl()
pth = os.path.join(self.dirpath, 'test.xls')
f = open(pth, 'rb')
xls = ExcelFile(f)
# it works
xls.parse('Sheet1', index_col=0, parse_dates=True)
pth = os.path.join(self.dirpath, 'test.xlsx')
f = open(pth, 'rb')
xl = ExcelFile(f)
xl.parse('Sheet1', index_col=0, parse_dates=True)
def test_read_xlrd_Book(self):
_skip_if_no_xlrd()
_skip_if_no_xlwt()
import xlrd
df = self.frame
with ensure_clean('.xls') as pth:
df.to_excel(pth, "SheetA")
book = xlrd.open_workbook(pth)
with ExcelFile(book, engine="xlrd") as xl:
result = xl.parse("SheetA")
tm.assert_frame_equal(df, result)
result = read_excel(book, sheetname="SheetA", engine="xlrd")
tm.assert_frame_equal(df, result)
@tm.network
def test_read_from_http_url(self):
_skip_if_no_xlrd()
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/test.xlsx')
url_table = read_excel(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'test.xlsx')
local_table = read_excel(localtable)
tm.assert_frame_equal(url_table, local_table)
@slow
def test_read_from_file_url(self):
_skip_if_no_xlrd()
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'test.xlsx')
local_table = read_excel(localtable)
try:
url_table = read_excel('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_xlsx_table(self):
_skip_if_no_xlrd()
_skip_if_no_openpyxl()
pth = os.path.join(self.dirpath, 'test.xlsx')
xlsx = ExcelFile(pth)
df = xlsx.parse('Sheet1', index_col=0, parse_dates=True)
df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df3 = xlsx.parse('Sheet2', skiprows=[1], index_col=0, parse_dates=True)
# TODO add index to xlsx file
tm.assert_frame_equal(df, df2, check_names=False)
tm.assert_frame_equal(df3, df2, check_names=False)
df4 = xlsx.parse('Sheet1', index_col=0, parse_dates=True,
skipfooter=1)
df5 = xlsx.parse('Sheet1', index_col=0, parse_dates=True,
skip_footer=1)
tm.assert_frame_equal(df4, df.ix[:-1])
tm.assert_frame_equal(df4, df5)
def test_reader_closes_file(self):
_skip_if_no_xlrd()
_skip_if_no_openpyxl()
pth = os.path.join(self.dirpath, 'test.xlsx')
f = open(pth, 'rb')
with ExcelFile(f) as xlsx:
# parses okay
xlsx.parse('Sheet1', index_col=0)
self.assertTrue(f.closed)
def test_reader_special_dtypes(self):
_skip_if_no_xlrd()
expected = DataFrame.from_items([
("IntCol", [1, 2, -3, 4, 0]),
("FloatCol", [1.25, 2.25, 1.83, 1.92, 0.0000000005]),
("BoolCol", [True, False, True, True, False]),
("StrCol", [1, 2, 3, 4, 5]),
# GH5394 - this is why convert_float isn't vectorized
("Str2Col", ["a", 3, "c", "d", "e"]),
("DateCol", [datetime(2013, 10, 30), datetime(2013, 10, 31),
datetime(1905, 1, 1), datetime(2013, 12, 14),
datetime(2015, 3, 14)])
])
xlsx_path = os.path.join(self.dirpath, 'test_types.xlsx')
xls_path = os.path.join(self.dirpath, 'test_types.xls')
# should read in correctly and infer types
for path in (xls_path, xlsx_path):
actual = read_excel(path, 'Sheet1')
tm.assert_frame_equal(actual, expected)
# if not coercing number, then int comes in as float
float_expected = expected.copy()
float_expected["IntCol"] = float_expected["IntCol"].astype(float)
float_expected.loc[1, "Str2Col"] = 3.0
for path in (xls_path, xlsx_path):
actual = read_excel(path, 'Sheet1', convert_float=False)
tm.assert_frame_equal(actual, float_expected)
# check setting Index (assuming xls and xlsx are the same here)
for icol, name in enumerate(expected.columns):
actual = read_excel(xlsx_path, 'Sheet1', index_col=icol)
actual2 = read_excel(xlsx_path, 'Sheet1', index_col=name)
exp = expected.set_index(name)
tm.assert_frame_equal(actual, exp)
tm.assert_frame_equal(actual2, exp)
# convert_float and converters should be different but both accepted
expected["StrCol"] = expected["StrCol"].apply(str)
actual = read_excel(xlsx_path, 'Sheet1', converters={"StrCol": str})
tm.assert_frame_equal(actual, expected)
no_convert_float = float_expected.copy()
no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str)
actual = read_excel(xlsx_path, 'Sheet1', converters={"StrCol": str},
convert_float=False)
tm.assert_frame_equal(actual, no_convert_float)
# GH8212 - support for converters and missing values
def test_reader_converters(self):
_skip_if_no_xlrd()
expected = DataFrame.from_items([
("IntCol", [1, 2, -3, -1000, 0]),
("FloatCol", [12.5, np.nan, 18.3, 19.2, 0.000000005]),
("BoolCol", ['Found', 'Found', 'Found', 'Not found', 'Found']),
("StrCol", ['1', np.nan, '3', '4', '5']),
])
converters = {'IntCol': lambda x: int(x) if x != '' else -1000,
'FloatCol': lambda x: 10 * x if x else np.nan,
2: lambda x: 'Found' if x != '' else 'Not found',
3: lambda x: str(x) if x else '',
}
xlsx_path = os.path.join(self.dirpath, 'test_converters.xlsx')
xls_path = os.path.join(self.dirpath, 'test_converters.xls')
# should read in correctly and set types of single cells (not array dtypes)
for path in (xls_path, xlsx_path):
actual = read_excel(path, 'Sheet1', converters=converters)
tm.assert_frame_equal(actual, expected)
def test_reading_all_sheets(self):
# Test reading all sheetnames by setting sheetname to None,
# Ensure a dict is returned.
# See PR #9450
_skip_if_no_xlrd()
dfs = read_excel(self.multisheet,sheetname=None)
expected_keys = ['Alpha','Beta','Charlie']
tm.assert_contains_all(expected_keys,dfs.keys())
def test_reading_multiple_specific_sheets(self):
# Test reading specific sheetnames by specifying a mixed list
# of integers and strings, and confirm that duplicated sheet
# references (positions/names) are removed properly.
# Ensure a dict is returned
# See PR #9450
_skip_if_no_xlrd()
#Explicitly request duplicates. Only the set should be returned.
expected_keys = [2,'Charlie','Charlie']
dfs = read_excel(self.multisheet,sheetname=expected_keys)
expected_keys = list(set(expected_keys))
tm.assert_contains_all(expected_keys,dfs.keys())
assert len(expected_keys) == len(dfs.keys())
def test_creating_and_reading_multiple_sheets(self):
# Test reading multiple sheets, from a runtime created excel file
# with multiple sheets.
# See PR #9450
_skip_if_no_xlrd()
_skip_if_no_xlwt()
def tdf(sheetname):
d, i = [11,22,33], [1,2,3]
return DataFrame(d,i,columns=[sheetname])
sheets = ['AAA','BBB','CCC']
dfs = [tdf(s) for s in sheets]
dfs = dict(zip(sheets,dfs))
with ensure_clean('.xlsx') as pth:
with ExcelWriter(pth) as ew:
for sheetname, df in dfs.iteritems():
df.to_excel(ew,sheetname)
dfs_returned = pd.read_excel(pth,sheetname=sheets)
for s in sheets:
tm.assert_frame_equal(dfs[s],dfs_returned[s])
def test_reader_seconds(self):
# Test reading times with and without milliseconds. GH5945.
_skip_if_no_xlrd()
import xlrd
if LooseVersion(xlrd.__VERSION__) >= LooseVersion("0.9.3"):
# Xlrd >= 0.9.3 can handle Excel milliseconds.
expected = DataFrame.from_items([("Time",
[time(1, 2, 3),
time(2, 45, 56, 100000),
time(4, 29, 49, 200000),
time(6, 13, 42, 300000),
time(7, 57, 35, 400000),
time(9, 41, 28, 500000),
time(11, 25, 21, 600000),
time(13, 9, 14, 700000),
time(14, 53, 7, 800000),
time(16, 37, 0, 900000),
time(18, 20, 54)])])
else:
# Xlrd < 0.9.3 rounds Excel milliseconds.
expected = DataFrame.from_items([("Time",
[time(1, 2, 3),
time(2, 45, 56),
time(4, 29, 49),
time(6, 13, 42),
time(7, 57, 35),
time(9, 41, 29),
time(11, 25, 22),
time(13, 9, 15),
time(14, 53, 8),
time(16, 37, 1),
time(18, 20, 54)])])
epoch_1900 = os.path.join(self.dirpath, 'times_1900.xls')
epoch_1904 = os.path.join(self.dirpath, 'times_1904.xls')
actual = read_excel(epoch_1900, 'Sheet1')
tm.assert_frame_equal(actual, expected)
actual = read_excel(epoch_1904, 'Sheet1')
tm.assert_frame_equal(actual, expected)
class ExcelWriterBase(SharedItems):
# Base class for test cases to run with different Excel writers.
# To add a writer test, define the following:
# 1. A check_skip function that skips your tests if your writer isn't
# installed.
# 2. Add a property ext, which is the file extension that your writer
# writes to. (needs to start with '.' so it's a valid path)
# 3. Add a property engine_name, which is the name of the writer class.
# Test with MultiIndex and Hierarchical Rows as merged cells.
merge_cells = True
def setUp(self):
self.check_skip()
super(ExcelWriterBase, self).setUp()
self.option_name = 'io.excel.%s.writer' % self.ext.strip('.')
self.prev_engine = get_option(self.option_name)
set_option(self.option_name, self.engine_name)
def tearDown(self):
set_option(self.option_name, self.prev_engine)
def test_excel_sheet_by_name_raise(self):
_skip_if_no_xlrd()
import xlrd
with ensure_clean(self.ext) as pth:
gt = DataFrame(np.random.randn(10, 2))
gt.to_excel(pth)
xl = ExcelFile(pth)
df = xl.parse(0)
tm.assert_frame_equal(gt, df)
self.assertRaises(xlrd.XLRDError, xl.parse, '0')
def test_excelwriter_contextmanager(self):
_skip_if_no_xlrd()
with ensure_clean(self.ext) as pth:
with ExcelWriter(pth) as writer:
self.frame.to_excel(writer, 'Data1')
self.frame2.to_excel(writer, 'Data2')
with ExcelFile(pth) as reader:
found_df = reader.parse('Data1')
found_df2 = reader.parse('Data2')
tm.assert_frame_equal(found_df, self.frame)
tm.assert_frame_equal(found_df2, self.frame2)
def test_roundtrip(self):
_skip_if_no_xlrd()
with ensure_clean(self.ext) as path:
self.frame['A'][:5] = nan
self.frame.to_excel(path, 'test1')
self.frame.to_excel(path, 'test1', columns=['A', 'B'])
self.frame.to_excel(path, 'test1', header=False)
self.frame.to_excel(path, 'test1', index=False)
# test roundtrip
self.frame.to_excel(path, 'test1')
recons = read_excel(path, 'test1', index_col=0)
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(path, 'test1', index=False)
recons = read_excel(path, 'test1', index_col=None)
recons.index = self.frame.index
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(path, 'test1', na_rep='NA')
recons = read_excel(path, 'test1', index_col=0, na_values=['NA'])
tm.assert_frame_equal(self.frame, recons)
# GH 3611
self.frame.to_excel(path, 'test1', na_rep='88')
recons = read_excel(path, 'test1', index_col=0, na_values=['88'])
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(path, 'test1', na_rep='88')
recons = read_excel(path, 'test1', index_col=0,
na_values=[88, 88.0])
tm.assert_frame_equal(self.frame, recons)
# GH 6573
self.frame.to_excel(path, 'Sheet1')
recons = read_excel(path, index_col=0)
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(path, '0')
recons = read_excel(path, index_col=0)
tm.assert_frame_equal(self.frame, recons)
def test_mixed(self):
_skip_if_no_xlrd()
with ensure_clean(self.ext) as path:
self.mixed_frame.to_excel(path, 'test1')
reader = ExcelFile(path)
recons = reader.parse('test1', index_col=0)
tm.assert_frame_equal(self.mixed_frame, recons)
def test_tsframe(self):
_skip_if_no_xlrd()
df = tm.makeTimeDataFrame()[:5]
with ensure_clean(self.ext) as path:
df.to_excel(path, 'test1')
reader = ExcelFile(path)
recons = reader.parse('test1')
tm.assert_frame_equal(df, recons)
def test_basics_with_nan(self):
_skip_if_no_xlrd()
with ensure_clean(self.ext) as path:
self.frame['A'][:5] = nan
self.frame.to_excel(path, 'test1')
self.frame.to_excel(path, 'test1', columns=['A', 'B'])
self.frame.to_excel(path, 'test1', header=False)
self.frame.to_excel(path, 'test1', index=False)
def test_int_types(self):
_skip_if_no_xlrd()
for np_type in (np.int8, np.int16, np.int32, np.int64):
with ensure_clean(self.ext) as path:
# Test np.int values read come back as int (rather than float
# which is Excel's format).
frame = DataFrame(np.random.randint(-10, 10, size=(10, 2)),
dtype=np_type)
frame.to_excel(path, 'test1')
reader = ExcelFile(path)
recons = reader.parse('test1')
int_frame = frame.astype(np.int64)
tm.assert_frame_equal(int_frame, recons)
recons2 = read_excel(path, 'test1')
tm.assert_frame_equal(int_frame, recons2)
# test with convert_float=False comes back as float
float_frame = frame.astype(float)
recons = read_excel(path, 'test1', convert_float=False)
tm.assert_frame_equal(recons, float_frame)
def test_float_types(self):
_skip_if_no_xlrd()
for np_type in (np.float16, np.float32, np.float64):
with ensure_clean(self.ext) as path:
# Test np.float values read come back as float.
frame = DataFrame(np.random.random_sample(10), dtype=np_type)
frame.to_excel(path, 'test1')
reader = ExcelFile(path)
recons = reader.parse('test1').astype(np_type)
tm.assert_frame_equal(frame, recons, check_dtype=False)
def test_bool_types(self):
_skip_if_no_xlrd()
for np_type in (np.bool8, np.bool_):
with ensure_clean(self.ext) as path:
# Test np.bool values read come back as float.
frame = (DataFrame([1, 0, True, False], dtype=np_type))
frame.to_excel(path, 'test1')
reader = ExcelFile(path)
recons = reader.parse('test1').astype(np_type)
tm.assert_frame_equal(frame, recons)
def test_inf_roundtrip(self):
_skip_if_no_xlrd()
frame = DataFrame([(1, np.inf), (2, 3), (5, -np.inf)])
with ensure_clean(self.ext) as path:
frame.to_excel(path, 'test1')
reader = ExcelFile(path)
recons = reader.parse('test1')
tm.assert_frame_equal(frame, recons)
def test_sheets(self):
_skip_if_no_xlrd()
with ensure_clean(self.ext) as path:
self.frame['A'][:5] = nan
self.frame.to_excel(path, 'test1')
self.frame.to_excel(path, 'test1', columns=['A', 'B'])
self.frame.to_excel(path, 'test1', header=False)
self.frame.to_excel(path, 'test1', index=False)
# Test writing to separate sheets
writer = ExcelWriter(path)
self.frame.to_excel(writer, 'test1')
self.tsframe.to_excel(writer, 'test2')
writer.save()
reader = ExcelFile(path)
recons = reader.parse('test1', index_col=0)
tm.assert_frame_equal(self.frame, recons)
recons = reader.parse('test2', index_col=0)
tm.assert_frame_equal(self.tsframe, recons)
np.testing.assert_equal(2, len(reader.sheet_names))
np.testing.assert_equal('test1', reader.sheet_names[0])
np.testing.assert_equal('test2', reader.sheet_names[1])
def test_colaliases(self):
_skip_if_no_xlrd()
with ensure_clean(self.ext) as path:
self.frame['A'][:5] = nan
self.frame.to_excel(path, 'test1')
self.frame.to_excel(path, 'test1', columns=['A', 'B'])
self.frame.to_excel(path, 'test1', header=False)
self.frame.to_excel(path, 'test1', index=False)
# column aliases
col_aliases = Index(['AA', 'X', 'Y', 'Z'])
self.frame2.to_excel(path, 'test1', header=col_aliases)
reader = ExcelFile(path)
rs = reader.parse('test1', index_col=0)
xp = self.frame2.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
def test_roundtrip_indexlabels(self):
_skip_if_no_xlrd()
with ensure_clean(self.ext) as path:
self.frame['A'][:5] = nan
self.frame.to_excel(path, 'test1')
self.frame.to_excel(path, 'test1', columns=['A', 'B'])
self.frame.to_excel(path, 'test1', header=False)
self.frame.to_excel(path, 'test1', index=False)
# test index_label
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(path, 'test1',
index_label=['test'],
merge_cells=self.merge_cells)
reader = ExcelFile(path)
recons = reader.parse('test1',
index_col=0,
has_index_names=self.merge_cells
).astype(np.int64)
frame.index.names = ['test']
self.assertEqual(frame.index.names, recons.index.names)
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(path,
'test1',
index_label=['test', 'dummy', 'dummy2'],
merge_cells=self.merge_cells)
reader = ExcelFile(path)
recons = reader.parse('test1',
index_col=0,
has_index_names=self.merge_cells
).astype(np.int64)
frame.index.names = ['test']
self.assertEqual(frame.index.names, recons.index.names)
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(path,
'test1',
index_label='test',
merge_cells=self.merge_cells)
reader = ExcelFile(path)
recons = reader.parse('test1',
index_col=0,
has_index_names=self.merge_cells
).astype(np.int64)
frame.index.names = ['test']
tm.assert_frame_equal(frame, recons.astype(bool))
with ensure_clean(self.ext) as path:
self.frame.to_excel(path,
'test1',
columns=['A', 'B', 'C', 'D'],
index=False, merge_cells=self.merge_cells)
# take 'A' and 'B' as indexes (same row as cols 'C', 'D')
df = self.frame.copy()
df = df.set_index(['A', 'B'])
reader = ExcelFile(path)
recons = reader.parse('test1', index_col=[0, 1])
tm.assert_frame_equal(df, recons, check_less_precise=True)
def test_excel_roundtrip_indexname(self):
_skip_if_no_xlrd()
df = DataFrame(np.random.randn(10, 4))
df.index.name = 'foo'
with ensure_clean(self.ext) as path:
df.to_excel(path, merge_cells=self.merge_cells)
xf = ExcelFile(path)
result = xf.parse(xf.sheet_names[0],
index_col=0,
has_index_names=self.merge_cells)
tm.assert_frame_equal(result, df)
self.assertEqual(result.index.name, 'foo')
def test_excel_roundtrip_datetime(self):
_skip_if_no_xlrd()
# datetime.date, not sure what to test here exactly
tsf = self.tsframe.copy()
with ensure_clean(self.ext) as path:
tsf.index = [x.date() for x in self.tsframe.index]
tsf.to_excel(path, 'test1', merge_cells=self.merge_cells)
reader = ExcelFile(path)
recons = reader.parse('test1')
tm.assert_frame_equal(self.tsframe, recons)
# GH4133 - excel output format strings
def test_excel_date_datetime_format(self):
_skip_if_no_xlrd()
df = DataFrame([[date(2014, 1, 31),
date(1999, 9, 24)],
[datetime(1998, 5, 26, 23, 33, 4),
datetime(2014, 2, 28, 13, 5, 13)]],
index=['DATE', 'DATETIME'], columns=['X', 'Y'])
df_expected = DataFrame([[datetime(2014, 1, 31),
datetime(1999, 9, 24)],
[datetime(1998, 5, 26, 23, 33, 4),
datetime(2014, 2, 28, 13, 5, 13)]],
index=['DATE', 'DATETIME'], columns=['X', 'Y'])
with ensure_clean(self.ext) as filename1:
with ensure_clean(self.ext) as filename2:
writer1 = ExcelWriter(filename1)
writer2 = ExcelWriter(filename2,
date_format='DD.MM.YYYY',
datetime_format='DD.MM.YYYY HH-MM-SS')
df.to_excel(writer1, 'test1')
df.to_excel(writer2, 'test1')
writer1.close()
writer2.close()
reader1 = ExcelFile(filename1)
reader2 = ExcelFile(filename2)
rs1 = reader1.parse('test1', index_col=None)
rs2 = reader2.parse('test1', index_col=None)
tm.assert_frame_equal(rs1, rs2)
# since the reader returns a datetime object for dates, we need
# to use df_expected to check the result
tm.assert_frame_equal(rs2, df_expected)
def test_to_excel_periodindex(self):
_skip_if_no_xlrd()
frame = self.tsframe
xp = frame.resample('M', kind='period')
with ensure_clean(self.ext) as path:
xp.to_excel(path, 'sht1')
reader = ExcelFile(path)
rs = reader.parse('sht1', index_col=0, parse_dates=True)
tm.assert_frame_equal(xp, rs.to_period('M'))
def test_to_excel_multiindex(self):
_skip_if_no_xlrd()
frame = self.frame
arrays = np.arange(len(frame.index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays,
names=['first', 'second'])
frame.index = new_index
with ensure_clean(self.ext) as path:
frame.to_excel(path, 'test1', header=False)
frame.to_excel(path, 'test1', columns=['A', 'B'])
# round trip
frame.to_excel(path, 'test1', merge_cells=self.merge_cells)
reader = ExcelFile(path)
df = reader.parse('test1', index_col=[0, 1],
parse_dates=False,
has_index_names=self.merge_cells)
tm.assert_frame_equal(frame, df)
self.assertEqual(frame.index.names, df.index.names)
def test_to_excel_multiindex_dates(self):
_skip_if_no_xlrd()
# try multiindex with dates
tsframe = self.tsframe.copy()
new_index = [tsframe.index, np.arange(len(tsframe.index))]
tsframe.index = MultiIndex.from_arrays(new_index)
with ensure_clean(self.ext) as path:
tsframe.index.names = ['time', 'foo']
tsframe.to_excel(path, 'test1', merge_cells=self.merge_cells)
reader = ExcelFile(path)
recons = reader.parse('test1',
index_col=[0, 1],
has_index_names=self.merge_cells)
tm.assert_frame_equal(tsframe, recons)
self.assertEqual(recons.index.names, ('time', 'foo'))
def test_to_excel_multiindex_no_write_index(self):
_skip_if_no_xlrd()
# Test writing and re-reading a MI witout the index. GH 5616.
# Initial non-MI frame.
frame1 = pd.DataFrame({'a': [10, 20], 'b': [30, 40], 'c': [50, 60]})
# Add a MI.
frame2 = frame1.copy()
multi_index = pd.MultiIndex.from_tuples([(70, 80), (90, 100)])
frame2.index = multi_index
with ensure_clean(self.ext) as path:
# Write out to Excel without the index.
frame2.to_excel(path, 'test1', index=False)
# Read it back in.
reader = ExcelFile(path)
frame3 = reader.parse('test1')
# Test that it is the same as the initial frame.
tm.assert_frame_equal(frame1, frame3)
def test_to_excel_float_format(self):
_skip_if_no_xlrd()
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean(self.ext) as filename:
df.to_excel(filename, 'test1', float_format='%.2f')
reader = ExcelFile(filename)
rs = reader.parse('test1', index_col=None)
xp = DataFrame([[0.12, 0.23, 0.57],
[12.32, 123123.20, 321321.20]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
tm.assert_frame_equal(rs, xp)
def test_to_excel_output_encoding(self):
_skip_if_no_xlrd()
ext = self.ext
filename = '__tmp_to_excel_float_format__.' + ext
df = DataFrame([[u('\u0192'), u('\u0193'), u('\u0194')],
[u('\u0195'), u('\u0196'), u('\u0197')]],
index=[u('A\u0192'), 'B'], columns=[u('X\u0193'), 'Y', 'Z'])
with ensure_clean(filename) as filename:
df.to_excel(filename, sheet_name='TestSheet', encoding='utf8')
result = read_excel(filename, 'TestSheet', encoding='utf8')
tm.assert_frame_equal(result, df)
def test_to_excel_unicode_filename(self):
_skip_if_no_xlrd()
with ensure_clean(u('\u0192u.') + self.ext) as filename:
try:
f = open(filename, 'wb')
except UnicodeEncodeError:
raise nose.SkipTest('no unicode file names on this system')
else:
f.close()
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
df.to_excel(filename, 'test1', float_format='%.2f')
reader = ExcelFile(filename)
rs = reader.parse('test1', index_col=None)
xp = DataFrame([[0.12, 0.23, 0.57],
[12.32, 123123.20, 321321.20]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
tm.assert_frame_equal(rs, xp)
# def test_to_excel_header_styling_xls(self):
# import StringIO
# s = StringIO(
# """Date,ticker,type,value
# 2001-01-01,x,close,12.2
# 2001-01-01,x,open ,12.1
# 2001-01-01,y,close,12.2
# 2001-01-01,y,open ,12.1
# 2001-02-01,x,close,12.2
# 2001-02-01,x,open ,12.1
# 2001-02-01,y,close,12.2
# 2001-02-01,y,open ,12.1
# 2001-03-01,x,close,12.2
# 2001-03-01,x,open ,12.1
# 2001-03-01,y,close,12.2
# 2001-03-01,y,open ,12.1""")
# df = read_csv(s, parse_dates=["Date"])
# pdf = df.pivot_table(values="value", rows=["ticker"],
# cols=["Date", "type"])
# try:
# import xlwt
# import xlrd
# except ImportError:
# raise nose.SkipTest
# filename = '__tmp_to_excel_header_styling_xls__.xls'
# pdf.to_excel(filename, 'test1')
# wbk = xlrd.open_workbook(filename,
# formatting_info=True)
# self.assertEqual(["test1"], wbk.sheet_names())
# ws = wbk.sheet_by_name('test1')
# self.assertEqual([(0, 1, 5, 7), (0, 1, 3, 5), (0, 1, 1, 3)],
# ws.merged_cells)
# for i in range(0, 2):
# for j in range(0, 7):
# xfx = ws.cell_xf_index(0, 0)
# cell_xf = wbk.xf_list[xfx]
# font = wbk.font_list
# self.assertEqual(1, font[cell_xf.font_index].bold)
# self.assertEqual(1, cell_xf.border.top_line_style)
# self.assertEqual(1, cell_xf.border.right_line_style)
# self.assertEqual(1, cell_xf.border.bottom_line_style)
# self.assertEqual(1, cell_xf.border.left_line_style)
# self.assertEqual(2, cell_xf.alignment.hor_align)
# os.remove(filename)
# def test_to_excel_header_styling_xlsx(self):
# import StringIO
# s = StringIO(
# """Date,ticker,type,value
# 2001-01-01,x,close,12.2
# 2001-01-01,x,open ,12.1
# 2001-01-01,y,close,12.2
# 2001-01-01,y,open ,12.1
# 2001-02-01,x,close,12.2
# 2001-02-01,x,open ,12.1
# 2001-02-01,y,close,12.2
# 2001-02-01,y,open ,12.1
# 2001-03-01,x,close,12.2
# 2001-03-01,x,open ,12.1
# 2001-03-01,y,close,12.2
# 2001-03-01,y,open ,12.1""")
# df = read_csv(s, parse_dates=["Date"])
# pdf = df.pivot_table(values="value", rows=["ticker"],
# cols=["Date", "type"])
# try:
# import openpyxl
# from openpyxl.cell import get_column_letter
# except ImportError:
# raise nose.SkipTest
# if openpyxl.__version__ < '1.6.1':
# raise nose.SkipTest
# # test xlsx_styling
# filename = '__tmp_to_excel_header_styling_xlsx__.xlsx'
# pdf.to_excel(filename, 'test1')
# wbk = openpyxl.load_workbook(filename)
# self.assertEqual(["test1"], wbk.get_sheet_names())
# ws = wbk.get_sheet_by_name('test1')
# xlsaddrs = ["%s2" % chr(i) for i in range(ord('A'), ord('H'))]
# xlsaddrs += ["A%s" % i for i in range(1, 6)]
# xlsaddrs += ["B1", "D1", "F1"]
# for xlsaddr in xlsaddrs:
# cell = ws.cell(xlsaddr)
# self.assertTrue(cell.style.font.bold)
# self.assertEqual(openpyxl.style.Border.BORDER_THIN,
# cell.style.borders.top.border_style)
# self.assertEqual(openpyxl.style.Border.BORDER_THIN,
# cell.style.borders.right.border_style)
# self.assertEqual(openpyxl.style.Border.BORDER_THIN,
# cell.style.borders.bottom.border_style)
# self.assertEqual(openpyxl.style.Border.BORDER_THIN,
# cell.style.borders.left.border_style)
# self.assertEqual(openpyxl.style.Alignment.HORIZONTAL_CENTER,
# cell.style.alignment.horizontal)
# mergedcells_addrs = ["C1", "E1", "G1"]
# for maddr in mergedcells_addrs:
# self.assertTrue(ws.cell(maddr).merged)
# os.remove(filename)
def test_excel_010_hemstring(self):
_skip_if_no_xlrd()
if self.merge_cells:
raise nose.SkipTest('Skip tests for merged MI format.')
from pandas.util.testing import makeCustomDataframe as mkdf
# ensure limited functionality in 0.10
# override of #2370 until sorted out in 0.11
def roundtrip(df, header=True, parser_hdr=0):
with ensure_clean(self.ext) as path:
df.to_excel(path, header=header, merge_cells=self.merge_cells)
xf = pd.ExcelFile(path)
res = xf.parse(xf.sheet_names[0], header=parser_hdr)
return res
nrows = 5
ncols = 3
for i in range(1, 4): # row multindex upto nlevel=3
for j in range(1, 4): # col ""
df = mkdf(nrows, ncols, r_idx_nlevels=i, c_idx_nlevels=j)
res = roundtrip(df)
# shape
self.assertEqual(res.shape, (nrows, ncols + i))
# no nans
for r in range(len(res.index)):
for c in range(len(res.columns)):
self.assertTrue(res.ix[r, c] is not np.nan)
for i in range(1, 4): # row multindex upto nlevel=3
for j in range(1, 4): # col ""
df = mkdf(nrows, ncols, r_idx_nlevels=i, c_idx_nlevels=j)
res = roundtrip(df, False)
# shape
self.assertEqual(res.shape, (
nrows - 1, ncols + i)) # first row taken as columns
# no nans
for r in range(len(res.index)):
for c in range(len(res.columns)):
self.assertTrue(res.ix[r, c] is not np.nan)
res = roundtrip(DataFrame([0]))
self.assertEqual(res.shape, (1, 1))
self.assertTrue(res.ix[0, 0] is not np.nan)
res = roundtrip(DataFrame([0]), False, None)
self.assertEqual(res.shape, (1, 2))
self.assertTrue(res.ix[0, 0] is not np.nan)
def test_duplicated_columns(self):
# Test for issue #5235.
_skip_if_no_xlrd()
with ensure_clean(self.ext) as path:
write_frame = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
colnames = ['A', 'B', 'B']
write_frame.columns = colnames
write_frame.to_excel(path, 'test1')
read_frame = read_excel(path, 'test1')
read_frame.columns = colnames
tm.assert_frame_equal(write_frame, read_frame)
def test_swapped_columns(self):
# Test for issue #5427.
_skip_if_no_xlrd()
with ensure_clean(self.ext) as path:
write_frame = DataFrame({'A': [1, 1, 1],
'B': [2, 2, 2]})
write_frame.to_excel(path, 'test1', columns=['B', 'A'])
read_frame = read_excel(path, 'test1', header=0)
tm.assert_series_equal(write_frame['A'], read_frame['A'])
tm.assert_series_equal(write_frame['B'], read_frame['B'])
def test_datetimes(self):
# Test writing and reading datetimes. For issue #9139. (xref #9185)
_skip_if_no_xlrd()
datetimes = [datetime(2013, 1, 13, 1, 2, 3),
datetime(2013, 1, 13, 2, 45, 56),
datetime(2013, 1, 13, 4, 29, 49),
datetime(2013, 1, 13, 6, 13, 42),
datetime(2013, 1, 13, 7, 57, 35),
datetime(2013, 1, 13, 9, 41, 28),
datetime(2013, 1, 13, 11, 25, 21),
datetime(2013, 1, 13, 13, 9, 14),
datetime(2013, 1, 13, 14, 53, 7),
datetime(2013, 1, 13, 16, 37, 0),
datetime(2013, 1, 13, 18, 20, 52)]
with ensure_clean(self.ext) as path:
write_frame = DataFrame.from_items([('A', datetimes)])
write_frame.to_excel(path, 'Sheet1')
read_frame = read_excel(path, 'Sheet1', header=0)
tm.assert_series_equal(write_frame['A'], read_frame['A'])
def raise_wrapper(major_ver):
def versioned_raise_wrapper(orig_method):
@functools.wraps(orig_method)
def wrapped(self, *args, **kwargs):
_skip_if_no_openpyxl()
if openpyxl_compat.is_compat(major_ver=major_ver):
orig_method(self, *args, **kwargs)
else:
msg = 'Installed openpyxl is not supported at this time\. Use.+'
with tm.assertRaisesRegexp(ValueError, msg):
orig_method(self, *args, **kwargs)
return wrapped
return versioned_raise_wrapper
def raise_on_incompat_version(major_ver):
def versioned_raise_on_incompat_version(cls):
methods = filter(operator.methodcaller('startswith', 'test_'), dir(cls))
for method in methods:
setattr(cls, method, raise_wrapper(major_ver)(getattr(cls, method)))
return cls
return versioned_raise_on_incompat_version
@raise_on_incompat_version(1)
class OpenpyxlTests(ExcelWriterBase, tm.TestCase):
ext = '.xlsx'
engine_name = 'openpyxl1'
check_skip = staticmethod(lambda *args, **kwargs: None)
def test_to_excel_styleconverter(self):
_skip_if_no_openpyxl()
if not openpyxl_compat.is_compat(major_ver=1):
raise nose.SkipTest('incompatiable openpyxl version')
import openpyxl
hstyle = {"font": {"bold": True},
"borders": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"alignment": {"horizontal": "center", "vertical": "top"}}
xlsx_style = _Openpyxl1Writer._convert_to_style(hstyle)
self.assertTrue(xlsx_style.font.bold)
self.assertEqual(openpyxl.style.Border.BORDER_THIN,
xlsx_style.borders.top.border_style)
self.assertEqual(openpyxl.style.Border.BORDER_THIN,
xlsx_style.borders.right.border_style)
self.assertEqual(openpyxl.style.Border.BORDER_THIN,
xlsx_style.borders.bottom.border_style)
self.assertEqual(openpyxl.style.Border.BORDER_THIN,
xlsx_style.borders.left.border_style)
self.assertEqual(openpyxl.style.Alignment.HORIZONTAL_CENTER,
xlsx_style.alignment.horizontal)
self.assertEqual(openpyxl.style.Alignment.VERTICAL_TOP,
xlsx_style.alignment.vertical)
@raise_on_incompat_version(2)
class Openpyxl2Tests(ExcelWriterBase, tm.TestCase):
ext = '.xlsx'
engine_name = 'openpyxl2'
check_skip = staticmethod(lambda *args, **kwargs: None)
def test_to_excel_styleconverter(self):
_skip_if_no_openpyxl()
if not openpyxl_compat.is_compat(major_ver=2):
raise nose.SkipTest('incompatiable openpyxl version')
import openpyxl
from openpyxl import styles
hstyle = {
"font": {
"color": '00FF0000',
"bold": True,
},
"borders": {
"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin",
},
"alignment": {
"horizontal": "center",
"vertical": "top",
},
"fill": {
"patternType": 'solid',
'fgColor': {
'rgb': '006666FF',
'tint': 0.3,
},
},
"number_format": {
"format_code": "0.00"
},
"protection": {
"locked": True,
"hidden": False,
},
}
font_color = styles.Color('00FF0000')
font = styles.Font(bold=True, color=font_color)
side = styles.Side(style=styles.borders.BORDER_THIN)
border = styles.Border(top=side, right=side, bottom=side, left=side)
alignment = styles.Alignment(horizontal='center', vertical='top')
fill_color = styles.Color(rgb='006666FF', tint=0.3)
fill = styles.PatternFill(patternType='solid', fgColor=fill_color)
# ahh openpyxl API changes
ver = openpyxl.__version__
if ver >= LooseVersion('2.0.0') and ver < LooseVersion('2.1.0'):
number_format = styles.NumberFormat(format_code='0.00')
else:
number_format = '0.00' # XXX: Only works with openpyxl-2.1.0
protection = styles.Protection(locked=True, hidden=False)
kw = _Openpyxl2Writer._convert_to_style_kwargs(hstyle)
self.assertEqual(kw['font'], font)
self.assertEqual(kw['border'], border)
self.assertEqual(kw['alignment'], alignment)
self.assertEqual(kw['fill'], fill)
self.assertEqual(kw['number_format'], number_format)
self.assertEqual(kw['protection'], protection)
def test_write_cells_merge_styled(self):
_skip_if_no_openpyxl()
if not openpyxl_compat.is_compat(major_ver=2):
raise nose.SkipTest('incompatiable openpyxl version')
from pandas.core.format import ExcelCell
from openpyxl import styles
sheet_name='merge_styled'
sty_b1 = {'font': {'color': '00FF0000'}}
sty_a2 = {'font': {'color': '0000FF00'}}
initial_cells = [
ExcelCell(col=1, row=0, val=42, style=sty_b1),
ExcelCell(col=0, row=1, val=99, style=sty_a2),
]
sty_merged = {'font': { 'color': '000000FF', 'bold': True }}
sty_kwargs = _Openpyxl2Writer._convert_to_style_kwargs(sty_merged)
openpyxl_sty_merged = styles.Style(**sty_kwargs)
merge_cells = [
ExcelCell(col=0, row=0, val='pandas',
mergestart=1, mergeend=1, style=sty_merged),
]
with ensure_clean('.xlsx') as path:
writer = _Openpyxl2Writer(path)
writer.write_cells(initial_cells, sheet_name=sheet_name)
writer.write_cells(merge_cells, sheet_name=sheet_name)
wks = writer.sheets[sheet_name]
xcell_b1 = wks.cell('B1')
xcell_a2 = wks.cell('A2')
self.assertEqual(xcell_b1.style, openpyxl_sty_merged)
self.assertEqual(xcell_a2.style, openpyxl_sty_merged)
class XlwtTests(ExcelWriterBase, tm.TestCase):
ext = '.xls'
engine_name = 'xlwt'
check_skip = staticmethod(_skip_if_no_xlwt)
def test_to_excel_styleconverter(self):
_skip_if_no_xlwt()
import xlwt
hstyle = {"font": {"bold": True},
"borders": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"alignment": {"horizontal": "center", "vertical": "top"}}
xls_style = _XlwtWriter._convert_to_style(hstyle)
self.assertTrue(xls_style.font.bold)
self.assertEqual(xlwt.Borders.THIN, xls_style.borders.top)
self.assertEqual(xlwt.Borders.THIN, xls_style.borders.right)
self.assertEqual(xlwt.Borders.THIN, xls_style.borders.bottom)
self.assertEqual(xlwt.Borders.THIN, xls_style.borders.left)
self.assertEqual(xlwt.Alignment.HORZ_CENTER, xls_style.alignment.horz)
self.assertEqual(xlwt.Alignment.VERT_TOP, xls_style.alignment.vert)
class XlsxWriterTests(ExcelWriterBase, tm.TestCase):
ext = '.xlsx'
engine_name = 'xlsxwriter'
check_skip = staticmethod(_skip_if_no_xlsxwriter)
def test_column_format(self):
# Test that column formats are applied to cells. Test for issue #9167.
# Applicable to xlsxwriter only.
_skip_if_no_xlsxwriter()
import warnings
with warnings.catch_warnings():
# Ignore the openpyxl lxml warning.
warnings.simplefilter("ignore")
_skip_if_no_openpyxl()
import openpyxl
with ensure_clean(self.ext) as path:
frame = DataFrame({'A': [123456, 123456],
'B': [123456, 123456]})
writer = ExcelWriter(path)
frame.to_excel(writer)
# Add a number format to col B and ensure it is applied to cells.
num_format = '#,##0'
write_workbook = writer.book
write_worksheet = write_workbook.worksheets()[0]
col_format = write_workbook.add_format({'num_format': num_format})
write_worksheet.set_column('B:B', None, col_format)
writer.save()
read_workbook = openpyxl.load_workbook(path)
read_worksheet = read_workbook.get_sheet_by_name(name='Sheet1')
# Get the number format from the cell. This method is backward
# compatible with older versions of openpyxl.
cell = read_worksheet.cell('B2')
try:
read_num_format = cell.style.number_format._format_code
except:
read_num_format = cell.style.number_format
self.assertEqual(read_num_format, num_format)
class OpenpyxlTests_NoMerge(ExcelWriterBase, tm.TestCase):
ext = '.xlsx'
engine_name = 'openpyxl'
check_skip = staticmethod(_skip_if_no_openpyxl)
# Test < 0.13 non-merge behaviour for MultiIndex and Hierarchical Rows.
merge_cells = False
class XlwtTests_NoMerge(ExcelWriterBase, tm.TestCase):
ext = '.xls'
engine_name = 'xlwt'
check_skip = staticmethod(_skip_if_no_xlwt)
# Test < 0.13 non-merge behaviour for MultiIndex and Hierarchical Rows.
merge_cells = False
class XlsxWriterTests_NoMerge(ExcelWriterBase, tm.TestCase):
ext = '.xlsx'
engine_name = 'xlsxwriter'
check_skip = staticmethod(_skip_if_no_xlsxwriter)
# Test < 0.13 non-merge behaviour for MultiIndex and Hierarchical Rows.
merge_cells = False
class ExcelWriterEngineTests(tm.TestCase):
def test_ExcelWriter_dispatch(self):
with tm.assertRaisesRegexp(ValueError, 'No engine'):
ExcelWriter('nothing')
try:
import xlsxwriter
writer_klass = _XlsxWriter
except ImportError:
_skip_if_no_openpyxl()
if not openpyxl_compat.is_compat(major_ver=1):
raise nose.SkipTest('incompatible openpyxl version')
writer_klass = _Openpyxl1Writer
with ensure_clean('.xlsx') as path:
writer = ExcelWriter(path)
tm.assert_isinstance(writer, writer_klass)
_skip_if_no_xlwt()
with ensure_clean('.xls') as path:
writer = ExcelWriter(path)
tm.assert_isinstance(writer, _XlwtWriter)
def test_register_writer(self):
# some awkward mocking to test out dispatch and such actually works
called_save = []
called_write_cells = []
class DummyClass(ExcelWriter):
called_save = False
called_write_cells = False
supported_extensions = ['test', 'xlsx', 'xls']
engine = 'dummy'
def save(self):
called_save.append(True)
def write_cells(self, *args, **kwargs):
called_write_cells.append(True)
def check_called(func):
func()
self.assertTrue(len(called_save) >= 1)
self.assertTrue(len(called_write_cells) >= 1)
del called_save[:]
del called_write_cells[:]
register_writer(DummyClass)
writer = ExcelWriter('something.test')
tm.assert_isinstance(writer, DummyClass)
df = tm.makeCustomDataframe(1, 1)
panel = tm.makePanel()
func = lambda: df.to_excel('something.test')
check_called(func)
check_called(lambda: panel.to_excel('something.test'))
val = get_option('io.excel.xlsx.writer')
set_option('io.excel.xlsx.writer', 'dummy')
check_called(lambda: df.to_excel('something.xlsx'))
check_called(lambda: df.to_excel('something.xls', engine='dummy'))
set_option('io.excel.xlsx.writer', val)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
gpl-2.0
|
temmeand/scikit-rf
|
skrf/network.py
|
1
|
195045
|
# -*- coding: utf-8 -*-
"""
.. module:: skrf.network
========================================
network (:mod:`skrf.network`)
========================================
Provides a n-port network class and associated functions.
Much of the functionality in this module is provided as methods and
properties of the :class:`Network` Class.
Network Class
===============
.. autosummary::
:toctree: generated/
Network
Building Network
----------------
.. autosummary::
:toctree: generated/
Network.from_z
Network Representations
============================
.. autosummary::
:toctree: generated/
Network.s
Network.z
Network.y
Network.a
Network.t
Connecting Networks
===============================
.. autosummary::
:toctree: generated/
connect
innerconnect
cascade
cascade_list
de_embed
flip
Interpolation and Concatenation Along Frequency Axis
=====================================================
.. autosummary::
:toctree: generated/
stitch
overlap
Network.resample
Network.interpolate
Network.interpolate_self
Combining Networks
===================================
.. autosummary::
:toctree: generated/
n_oneports_2_nport
four_oneports_2_twoport
three_twoports_2_threeport
n_twoports_2_nport
concat_ports
IO
====
.. autosummary::
skrf.io.general.read
skrf.io.general.write
skrf.io.general.ntwk_2_spreadsheet
Network.write
Network.write_touchstone
Network.read
Network.write_spreadsheet
Noise
============
.. autosummary::
:toctree: generated/
Network.add_noise_polar
Network.add_noise_polar_flatband
Network.multiply_noise
Supporting Functions
======================
.. autosummary::
:toctree: generated/
inv
connect_s
innerconnect_s
s2z
s2y
s2t
s2a
z2s
z2y
z2t
z2a
y2s
y2z
y2t
t2s
t2z
t2y
fix_z0_shape
renormalize_s
passivity
reciprocity
Misc Functions
=====================
.. autosummary::
:toctree: generated/
average
two_port_reflect
chopinhalf
Network.nudge
Network.renormalize
"""
from six.moves import xrange
from functools import reduce
import os
import warnings
import six.moves.cPickle as pickle
from six.moves.cPickle import UnpicklingError
import sys
import re
import zipfile
from copy import deepcopy as copy
from numbers import Number
from itertools import product
import numpy as npy
from numpy.linalg import inv as npy_inv
from numpy import fft, gradient, reshape, shape, ones
from scipy import stats, signal # for Network.add_noise_*, and Network.windowed
from scipy.interpolate import interp1d # for Network.interpolate()
from scipy.ndimage.filters import convolve1d
import unittest # fotr unitest.skip
from . import mathFunctions as mf
from .frequency import Frequency
from .tlineFunctions import zl_2_Gamma0
from .util import get_fid, get_extn, find_nearest_index, slice_domain
from .time import time_gate
# later imports. delayed to solve circular dependencies
# from .io.general import read, write
# from .io import touchstone
# from .io.general import network_2_spreadsheet
# from media import Freespace
from .constants import ZERO, K_BOLTZMANN, T0
from .constants import S_DEFINITIONS, S_DEF_DEFAULT
#from matplotlib import cm
#import matplotlib.pyplot as plt
#import matplotlib.tri as tri
#from scipy.interpolate import interp1d
class Network(object):
"""
A n-port electrical network [#]_.
For instructions on how to create Network see :func:`__init__`.
A n-port network may be defined by three quantities,
* network parameter matrix (s, z, or y-matrix)
* port characteristic impedance matrix
* frequency information
The :class:`Network` class stores these data structures internally
in the form of complex :class:`numpy.ndarray`'s. These arrays are not
interfaced directly but instead through the use of the properties:
===================== =============================================
Property Meaning
===================== =============================================
:attr:`s` scattering parameter matrix
:attr:`z0` characteristic impedance matrix
:attr:`f` frequency vector
===================== =============================================
Although these docs focus on s-parameters, other equivalent network
representations such as :attr:`z` and :attr:`y` are
available. Scalar projections of the complex network parameters
are accessible through properties as well. These also return
:class:`numpy.ndarray`'s.
===================== =============================================
Property Meaning
===================== =============================================
:attr:`s_re` real part of the s-matrix
:attr:`s_im` imaginary part of the s-matrix
:attr:`s_mag` magnitude of the s-matrix
:attr:`s_db` magnitude in log scale of the s-matrix
:attr:`s_deg` phase of the s-matrix in degrees
===================== =============================================
The following operations act on the networks s-matrix.
===================== =============================================
Operator Function
===================== =============================================
\+ element-wise addition of the s-matrix
\- element-wise difference of the s-matrix
\* element-wise multiplication of the s-matrix
\/ element-wise division of the s-matrix
\*\* cascading (only for 2-ports)
\// de-embedding (for 2-ports, see :attr:`inv`)
===================== =============================================
Different components of the :class:`Network` can be visualized
through various plotting methods. These methods can be used to plot
individual elements of the s-matrix or all at once. For more info
about plotting see the :doc:`../../tutorials/plotting` tutorial.
========================= =============================================
Method Meaning
========================= =============================================
:func:`plot_s_smith` plot complex s-parameters on smith chart
:func:`plot_s_re` plot real part of s-parameters vs frequency
:func:`plot_s_im` plot imaginary part of s-parameters vs frequency
:func:`plot_s_mag` plot magnitude of s-parameters vs frequency
:func:`plot_s_db` plot magnitude (in dB) of s-parameters vs frequency
:func:`plot_s_deg` plot phase of s-parameters (in degrees) vs frequency
:func:`plot_s_deg_unwrap` plot phase of s-parameters (in unwrapped degrees) vs frequency
========================= =============================================
:class:`Network` objects can be created from a touchstone or pickle
file (see :func:`__init__`), by a
:class:`~skrf.media.media.Media` object, or manually by assigning the
network properties directly. :class:`Network` objects
can be saved to disk in the form of touchstone files with the
:func:`write_touchstone` method.
An exhaustive list of :class:`Network` Methods and Properties
(Attributes) are given below
References
------------
.. [#] http://en.wikipedia.org/wiki/Two-port_network
"""
global PRIMARY_PROPERTIES
PRIMARY_PROPERTIES = ['s', 'z', 'y', 'a', 'h']
global COMPONENT_FUNC_DICT
COMPONENT_FUNC_DICT = {
're': npy.real,
'im': npy.imag,
'mag': npy.abs,
'db': mf.complex_2_db,
'db10': mf.complex_2_db10,
'rad': npy.angle,
'deg': lambda x: npy.angle(x, deg=True),
'arcl': lambda x: npy.angle(x) * npy.abs(x),
'rad_unwrap': lambda x: mf.unwrap_rad(npy.angle(x)),
'deg_unwrap': lambda x: mf.radian_2_degree(mf.unwrap_rad( \
npy.angle(x))),
'arcl_unwrap': lambda x: mf.unwrap_rad(npy.angle(x)) * \
npy.abs(x),
# 'gd' : lambda x: -1 * npy.gradient(mf.unwrap_rad(npy.angle(x)))[0], # removed because it depends on `f` as well as `s`
'vswr': lambda x: (1 + abs(x)) / (1 - abs(x)),
'time': mf.ifft,
'time_db': lambda x: mf.complex_2_db(mf.ifft(x)),
'time_mag': lambda x: mf.complex_2_magnitude(mf.ifft(x)),
'time_impulse': None,
'time_step': None,
}
# provides y-axis labels to the plotting functions
global Y_LABEL_DICT
Y_LABEL_DICT = {
're': 'Real Part',
'im': 'Imag Part',
'mag': 'Magnitude',
'abs': 'Magnitude',
'db': 'Magnitude (dB)',
'db10': 'Magnitude (dB)',
'deg': 'Phase (deg)',
'deg_unwrap': 'Phase (deg)',
'rad': 'Phase (rad)',
'rad_unwrap': 'Phase (rad)',
'arcl': 'Arc Length',
'arcl_unwrap': 'Arc Length',
'gd': 'Group Delay (s)',
'vswr': 'VSWR',
'passivity': 'Passivity',
'reciprocity': 'Reciprocity',
'time': 'Time (real)',
'time_db': 'Magnitude (dB)',
'time_mag': 'Magnitude',
'time_impulse': 'Magnitude',
'time_step': 'Magnitude',
}
noise_interp_kind = 'linear'
# CONSTRUCTOR
def __init__(self, file=None, name=None, comments=None, f_unit=None, s_def=S_DEF_DEFAULT, **kwargs):
'''
Network constructor.
Creates an n-port microwave network from a `file` or directly
from data. If no file or data is given, then an empty Network
is created.
Parameters
------------
file : str or file-object
file to load information from. supported formats are:
* touchstone file (.s?p)
* pickled Network (.ntwk, .p) see :func:`write`
name : str
Name of this Network. if None will try to use file, if
its a str
comments : str
Comments associated with the Network
s_def : str -> s_def : can be: 'power', 'pseudo' or 'traveling'
Scattering parameter definition : 'power' for power-waves definition,
'pseudo' for pseudo-waves definition.
'traveling' corresponds to the initial implementation.
Default is 'power'.
NB: results are the same for real-valued characteristic impedances.
\*\*kwargs :
key word arguments can be used to assign properties of the
Network, such as `s`, `f` and `z0`.
Examples
------------
From a touchstone
>>> n = rf.Network('ntwk1.s2p')
From a pickle file
>>> n = rf.Network('ntwk1.ntwk')
Create a blank network, then fill in values
>>> n = rf.Network()
>>> freq = rf.Frequency(1,3,3,'ghz')
>>> n.frequency, n.s, n.z0 = freq,[1,2,3], [1,2,3]
Directly from values
>>> n = rf.Network(f=[1,2,3],s=[1,2,3],z0=[1,2,3])
See Also
-----------
from_z : init from impedance values
read : read a network from a file
write : write a network to a file, using pickle
write_touchstone : write a network to a touchstone file
'''
# allow for old kwarg for backward compatibility
if 'touchstone_filename' in kwargs:
file = kwargs['touchstone_filename']
self.name = name
self.comments = comments
self.port_names = None
self.deembed = None
self.noise = None
self.noise_freq = None
if s_def not in S_DEFINITIONS:
raise ValueError('s_def parameter should be either:', S_DEFINITIONS)
else:
self.s_def = s_def
if file is not None:
# allows user to pass filename or file obj
# open file in 'binary' mode because we are going to try and
# unpickle it first
fid = get_fid(file, 'rb')
try:
self.read(fid)
except UnicodeDecodeError: # Support for pickles created in Python2 and loaded in Python3
self.read(fid, encoding='latin1')
except UnpicklingError:
# if unpickling doesn't work then, close fid, reopen in
# non-binary mode and try to read it as touchstone
filename = fid.name
fid.close()
self.read_touchstone(filename)
if name is None and isinstance(file, str):
name = os.path.splitext(os.path.basename(file))[0]
if self.frequency is not None and f_unit is not None:
self.frequency.unit = f_unit
# allow properties to be set through the constructor
for attr in PRIMARY_PROPERTIES + ['frequency', 'z0', 'f', 'noise', 'noise_freq']:
if attr in kwargs:
self.__setattr__(attr, kwargs[attr])
# self.nports = self.number_of_ports
@classmethod
def from_z(cls, z, *args, **kw):
'''
Create a Network from its Z-parameters
Parameters:
------------
z : Numpy array
Impedance matrix. Should be of shape fxnxn,
where f is frequency axis and n is number of ports
\*\*kwargs :
key word arguments can be used to assign properties of the
Network, `f` and `z0`.
Return
--------
ntw : :class:`Network`
Created Network
Example
--------
>>> f = rf.Frequency(start=1, stop=2, npoints=4) # 4 frequency points
>>> z = np.random.rand(len(f),2,2) + np.random.rand(len(f),2,2)*1j # 2-port z-matrix: shape=(4,2,2)
>>> ntw = rf.Network.from_z(z, f=f)
'''
s = npy.zeros(shape=z.shape)
me = cls(s=s, *args, **kw)
me.z = z
return me
# OPERATORS
def __pow__(self, other):
"""
cascade this network with another network
See `cascade`
"""
# if they pass a number then use power operator
if isinstance(other, Number):
out = self.copy()
out.s = out.s ** other
return out
else:
return cascade(self, other)
def __floordiv__(self, other):
"""
de-embedding 1 or 2 network[s], from this network
:param other: skrf.Network, list, tuple: Network(s) to de-embed
:return: skrf.Network: De-embedded network
See Also
----------
inv : inverse s-parameters
"""
if isinstance(other, (list, tuple)):
if len(other) >= 3:
warnings.warn(
"Number of networks greater than 2. Truncating!",
RuntimeWarning
)
other = other[:2]
else:
other = (other, )
for o in other:
if o.number_of_ports != 2:
raise IndexError('Incorrect number of ports in network {}.'.format(o.name))
if len(other) == 1:
# if passed 1 network (A) and another network B
# e.g. A // B
# e.g. A // (B)
# then de-embed like B.inv * A
b = other[0]
result = self.copy()
result.s = (b.inv ** self).s
# de_embed(self.s, b.s)
return result
else:
# if passed 1 network (A) and a list/tuple of 2 networks (B, C),
# e.g. A // (B, C)
# e.g. A // [B, C]
# then de-embed like B.inv * A * C.inv
b = other[0]
c = other[1]
result = self.copy()
result.s = (b.inv ** self ** c.inv).s
# flip(de_embed(flip(de_embed(c.s, self.s)), b.s))
return result
def __mul__(self, other):
"""
Element-wise complex multiplication of s-matrix
"""
result = self.copy()
if isinstance(other, Network):
self.__compatable_for_scalar_operation_test(other)
result.s = self.s * other.s
else:
# other may be an array or a number
result.s = self.s * npy.array(other).reshape(-1, self.nports, self.nports)
return result
def __rmul__(self, other):
"""
Element-wise complex multiplication of s-matrix
"""
result = self.copy()
if isinstance(other, Network):
self.__compatable_for_scalar_operation_test(other)
result.s = self.s * other.s
else:
# other may be an array or a number
result.s = self.s * npy.array(other).reshape(-1, self.nports, self.nports)
return result
def __add__(self, other):
"""
Element-wise complex addition of s-matrix
"""
result = self.copy()
if isinstance(other, Network):
self.__compatable_for_scalar_operation_test(other)
result.s = self.s + other.s
else:
# other may be an array or a number
result.s = self.s + npy.array(other).reshape(-1, self.nports, self.nports)
return result
def __radd__(self, other):
"""
Element-wise complex addition of s-matrix
"""
result = self.copy()
if isinstance(other, Network):
self.__compatable_for_scalar_operation_test(other)
result.s = self.s + other.s
else:
# other may be an array or a number
result.s = self.s + npy.array(other).reshape(-1, self.nports, self.nports)
return result
def __sub__(self, other):
"""
Element-wise complex subtraction of s-matrix
"""
result = self.copy()
if isinstance(other, Network):
self.__compatable_for_scalar_operation_test(other)
result.s = self.s - other.s
else:
# other may be an array or a number
result.s = self.s - npy.array(other).reshape(-1, self.nports, self.nports)
return result
def __rsub__(self, other):
"""
Element-wise complex subtraction of s-matrix
"""
result = self.copy()
if isinstance(other, Network):
self.__compatable_for_scalar_operation_test(other)
result.s = other.s - self.s
else:
# other may be an array or a number
result.s = npy.array(other).reshape(-1, self.nports, self.nports) - self.s
return result
def __truediv__(self, other):
return self.__div__(other)
def __div__(self, other):
"""
Element-wise complex multiplication of s-matrix
"""
result = self.copy()
if isinstance(other, Network):
self.__compatable_for_scalar_operation_test(other)
result.s = self.s / other.s
else:
# other may be an array or a number
result.s = self.s / npy.array(other).reshape(-1, self.nports, self.nports)
return result
def __eq__(self, other):
if other is None:
return False
if npy.all(npy.abs(self.s - other.s) < ZERO):
return True
else:
return False
def __ne__(self, other):
return (not self.__eq__(other))
def __getitem__(self, key):
"""
Slices a Network object based on an index, or human readable string
Parameters
-----------
key : str, or slice
if slice; like [2-10] then it is interpreted as the index of
the frequency.
if str, then should be like '50.1-75.5ghz', or just '50'.
If the frequency unit is omitted then self.frequency.unit is
used. This will also accept a 2 or 3 dimensional index of the
forms:
port1, port2
key, port1, port2
where port1 and port2 are allowed to be string port names if
the network has them defined (Network.port_names)
If port1 and port2 are integers, will return the single-port
network based on matrix notation (indices starts at 1 not 0)
Returns
-----------
skrf.Network :
interpolated in frequency if single dimension provided
OR
1-port network if multi-dimensional index provided
Examples
-----------
>>> from skrf.data import ring_slot
>>> a = ring_slot['80-90ghz']
>>> a.plot_s_db()
Multidimensional indexing:
>>> import skrf as rf
>>> b = rf.Network("sometouchstonefile.s2p")
>>> c = b['80mhz', 'first_port_name', 'second_port_name']
>>> d = b['first_port_name', 'second_port_name']
Equivalently:
>>> d = b[1,2]
Equivalent to:
>>> d = b.s12
"""
a = self.z0 # HACK: to force getter for z0 to re-shape it (z0 getter has side effects...)
# If user passes a multidimensional index, try to return that 1 port subnetwork
if type(key) == tuple:
if len(key) == 3:
slice_like, p1_name, p2_name = key
return self[slice_like][p1_name, p2_name]
elif len(key) == 2:
p1_name, p2_name = key
if type(p1_name) == int and type(p2_name) == int: # allow integer indexing if desired
if p1_name <= 0 or p2_name <= 0 or p1_name > self.nports or p2_name > self.nports:
raise ValueError("Port index out of bounds")
p1_index = p1_name - 1
p2_index = p2_name - 1
else:
if self.port_names is None:
raise ValueError("Can't index without named ports")
try:
p1_index = self.port_names.index(p1_name)
except ValueError as e:
raise KeyError("Unknown port {0}".format(p1_name))
try:
p2_index = self.port_names.index(p2_name)
except ValueError as e:
raise KeyError("Unknown port {0}".format(p2_name))
ntwk = self.copy()
ntwk.s = self.s[:, p1_index, p2_index]
ntwk.z0 = self.z0[:, p1_index]
ntwk.name = "{0}({1}, {2})".format(self.name, p1_name, p2_name)
ntwk.port_names = None
return ntwk
else:
raise ValueError("Don't understand index: {0}".format(key))
sliced_frequency = self.frequency[key]
return self.interpolate(sliced_frequency)
if isinstance(key, str):
sliced_frequency = self.frequency[key]
return self.interpolate(sliced_frequency)
if isinstance(key, Frequency):
return self.interpolate(key)
# The following avoids interpolation when the slice is done directly with indices
ntwk = self.copy_subset(key)
return ntwk
def __str__(self):
"""
"""
f = self.frequency
if self.name is None:
name = ''
else:
name = self.name
if len(npy.shape(self.z0)) == 0 or npy.shape(self.z0)[0] == 0:
z0 = str(self.z0)
else:
z0 = str(self.z0[0, :])
output = '%i-Port Network: \'%s\', %s, z0=%s' % (self.number_of_ports, name, str(f), z0)
return output
def __repr__(self):
return self.__str__()
def __len__(self):
"""
length of frequency axis
"""
return len(self.s)
# INTERNAL CODE GENERATION METHODS
def __compatable_for_scalar_operation_test(self, other):
"""
tests to make sure other network's s-matrix is of same shape
"""
if other.frequency != self.frequency:
raise IndexError('Networks must have same frequency. See `Network.interpolate`')
if other.s.shape != self.s.shape:
raise IndexError('Networks must have same number of ports.')
def __generate_secondary_properties(self):
"""
creates numerous `secondary properties` which are various
different scalar projects of the primary properties. the primary
properties are s,z, and y.
"""
for prop_name in PRIMARY_PROPERTIES:
for func_name in COMPONENT_FUNC_DICT:
func = COMPONENT_FUNC_DICT[func_name]
if 'gd' in func_name: # scaling of gradient by frequency
def fget(self, f=func, p=prop_name):
return f(getattr(self, p)) / (2 * npy.pi * self.frequency.step)
else:
def fget(self, f=func, p=prop_name):
return f(getattr(self, p))
doc = """
The %s component of the %s-matrix
See Also
----------
%s
""" % (func_name, prop_name, prop_name)
setattr(self.__class__, '%s_%s' % (prop_name, func_name), \
property(fget, doc=doc))
def __generate_subnetworks(self):
"""
generates all one-port sub-networks
"""
for m in range(self.number_of_ports):
for n in range(self.number_of_ports):
def fget(self, m=m, n=n):
ntwk = self.copy()
ntwk.s = self.s[:, m, n]
ntwk.z0 = self.z0[:, m]
return ntwk
doc = '''
one-port sub-network.
'''
setattr(self.__class__, 's%i%i' % (m + 1, n + 1), \
property(fget, doc=doc))
# PRIMARY PROPERTIES
@property
def s(self):
"""
Scattering parameter matrix.
The s-matrix[#]_ is a 3-dimensional :class:`numpy.ndarray` which has shape
`fxnxn`, where `f` is frequency axis and `n` is number of ports.
Note that indexing starts at 0, so s11 can be accessed by
taking the slice s[:,0,0].
Returns
---------
s : complex :class:`numpy.ndarray` of shape `fxnxn`
the scattering parameter matrix.
See Also
------------
s
y
z
t
a
References
------------
.. [#] http://en.wikipedia.org/wiki/Scattering_parameters
"""
return self._s
@s.setter
def s(self, s):
"""
the input s-matrix should be of shape fxnxn,
where f is frequency axis and n is number of ports
"""
s_shape = npy.shape(s)
if len(s_shape) < 3:
if len(s_shape) == 2:
# reshape to kx1x1, this simplifies indexing in function
s = npy.reshape(s, (-1, s_shape[0], s_shape[0]))
else:
s = npy.reshape(s, (-1, 1, 1))
self._s = npy.array(s, dtype=complex)
self.__generate_secondary_properties()
self.__generate_subnetworks()
@property
def h(self):
"""
Hybrid parameter matrix.
The h-matrix [#]_ is a 3-dimensional :class:`numpy.ndarray` which has shape
`fxnxn`, where `f` is frequency axis and `n` is number of ports.
Note that indexing starts at 0, so h11 can be accessed by
taking the slice `h[:,0,0]`.
Returns
---------
h : complex :class:`numpy.ndarray` of shape `fxnxn`
the hybrid parameter matrix.
See Also
------------
s
y
z
t
a
h
References
------------
.. [#] http://en.wikipedia.org/wiki/Two-port_network#Hybrid_parameters_(h-parameters)
"""
return s2h(self.s, self.z0)
@h.setter
def h(self, value):
self._s = h2s(value, self.z0)
@property
def y(self):
"""
Admittance parameter matrix.
The y-matrix [#]_ is a 3-dimensional :class:`numpy.ndarray` which has shape
`fxnxn`, where `f` is frequency axis and `n` is number of ports.
Note that indexing starts at 0, so y11 can be accessed by
taking the slice `y[:,0,0]`.
Returns
---------
y : complex :class:`numpy.ndarray` of shape `fxnxn`
the admittance parameter matrix.
See Also
------------
s
y
z
t
a
References
------------
.. [#] http://en.wikipedia.org/wiki/Admittance_parameters
"""
return s2y(self._s, self.z0, s_def=self.s_def)
@y.setter
def y(self, value):
self._s = y2s(value, self.z0, s_def=self.s_def)
@property
def z(self):
"""
Impedance parameter matrix.
The z-matrix [#]_ is a 3-dimensional :class:`numpy.ndarray` which has shape
`fxnxn`, where `f` is frequency axis and `n` is number of ports.
Note that indexing starts at 0, so z11 can be accessed by
taking the slice `z[:,0,0]`.
Returns
---------
z : complex :class:`numpy.ndarray` of shape `fxnxn`
the Impedance parameter matrix.
See Also
------------
s
y
z
t
a
References
------------
.. [#] http://en.wikipedia.org/wiki/impedance_parameters
"""
return s2z(self._s, self.z0, s_def=self.s_def)
@z.setter
def z(self, value):
self._s = z2s(value, self.z0, s_def=self.s_def)
@property
def t(self):
"""
Scattering transfer parameters
The t-matrix [#]_ is a 3-dimensional :class:`numpy.ndarray`
which has shape `fx2x2`, where `f` is frequency axis.
Note that indexing starts at 0, so t11 can be accessed by
taking the slice `t[:,0,0]`.
The t-matrix, also known as the wave cascading matrix, is
only defined for a 2-port Network.
Returns
--------
t : complex numpy.ndarry of shape `fx2x2`
t-parameters, aka scattering transfer parameters
See Also
------------
s
y
z
t
a
References
-----------
.. [#] http://en.wikipedia.org/wiki/Scattering_parameters#Scattering_transfer_parameters
"""
return s2t(self.s)
@property
def s_invert(self):
"""
Inverted scattering parameter matrix.
Inverted scattering parameters are simply inverted s-parameters,
defined as a = 1/s. Useful in analysis of active networks.
The a-matrix is a 3-dimensional :class:`numpy.ndarray` which has shape
`fxnxn`, where `f` is frequency axis and `n` is number of ports.
Note that indexing starts at 0, so a11 can be accessed by
taking the slice a[:,0,0].
Returns
---------
s_inv : complex :class:`numpy.ndarray` of shape `fxnxn`
the inverted scattering parameter matrix.
See Also
------------
s
y
z
t
a
"""
return 1 / self.s
@s_invert.setter
def s_invert(self, value):
raise NotImplementedError
@property
def a(self):
"""
abcd parameter matrix. Used to cascade two-ports
The abcd-matrix [#]_ is a 3-dimensional :class:`numpy.ndarray` which has shape
`fxnxn`, where `f` is frequency axis and `n` is number of ports.
Note that indexing starts at 0, so abcd11 can be accessed by
taking the slice `abcd[:,0,0]`.
Returns
---------
abcd : complex :class:`numpy.ndarray` of shape `fxnxn`
the Impedance parameter matrix.
See Also
------------
s
y
z
t
a
abcd
References
------------
.. [#] http://en.wikipedia.org/wiki/impedance_parameters
"""
return s2a(self.s, self.z0)
@a.setter
def a(self, value):
self._s = a2s(value, self.z0)
@property
def z0(self):
"""
Characteristic impedance[s] of the network ports.
This property stores the characteristic impedance of each port
of the network. Because it is possible that each port has
a different characteristic impedance each varying with
frequency, `z0` is stored internally as a `fxn` array.
However because `z0` is frequently simple (like 50ohm), it can
be set with just number as well.
Returns
--------
z0 : :class:`numpy.ndarray` of shape fxn
characteristic impedance for network
"""
# i hate this function
# it was written this way because id like to allow the user to
# set the z0 before the s-parameters are set. However, in this
# case we dont know how to re-shape the z0 to fxn. to solve this
# i attempt to do the re-shaping when z0 is accessed, not when
# it is set. this is what makes this function confusing.
try:
if len(npy.shape(self._z0)) == 0:
try:
# try and re-shape z0 to match s
self._z0 = self._z0 * npy.ones(self.s.shape[:-1])
except(AttributeError):
print('Warning: Network has improper \'z0\' shape.')
# they have yet to set s .
elif len(npy.shape(self._z0)) == 1:
try:
if len(self._z0) == self.frequency.npoints:
# this z0 is frequency dependent but not port dependent
self._z0 = \
npy.repeat(npy.reshape(self._z0, (-1, 1)), self.number_of_ports, 1)
elif len(self._z0) == self.number_of_ports:
# this z0 is port dependent but not frequency dependent
self._z0 = self._z0 * npy.ones( \
(self.frequency.npoints, self.number_of_ports))
else:
raise (IndexError('z0 has bad shape'))
except AttributeError:
# there is no self.frequency, or self.number_of_ports
raise AttributeError('Error: I cant reshape z0 through inspection. you must provide correctly '
'shaped z0, or s-matrix first.')
return self._z0
except AttributeError:
# print('Warning: z0 is undefined. Defaulting to 50.')
self.z0 = 50
return self.z0 # this is not an error, its a recursive call
@z0.setter
def z0(self, z0):
"""z0=npy.array(z0)
if len(z0.shape) < 2:
try:
#try and re-shape z0 to match s
z0=z0*npy.ones(self.s.shape[:-1])
except(AttributeError):
print ('Warning: you should store a Network\'s \'s\' matrix before its \'z0\'')
#they have yet to set s .
pass
"""
self._z0 = npy.array(z0, dtype=complex)
@property
def frequency(self):
"""
frequency information for the network.
This property is a :class:`~skrf.frequency.Frequency` object.
It holds the frequency vector, as well frequency unit, and
provides other properties related to frequency information, such
as start, stop, etc.
Returns
--------
frequency : :class:`~skrf.frequency.Frequency` object
frequency information for the network.
See Also
---------
f : property holding frequency vector in Hz
change_frequency : updates frequency property, and
interpolates s-parameters if needed
interpolate : interpolate function based on new frequency
info
"""
try:
return self._frequency
except (AttributeError):
self._frequency = Frequency(0, 0, 0)
return self._frequency
@frequency.setter
def frequency(self, new_frequency):
"""
takes a Frequency object, see frequency.py
"""
if isinstance(new_frequency, Frequency):
self._frequency = new_frequency.copy()
else:
try:
self._frequency = Frequency.from_f(new_frequency)
except (TypeError):
raise TypeError('Could not convert argument to a frequency vector')
@property
def inv(self):
"""
a :class:`Network` object with 'inverse' s-parameters.
This is used for de-embedding.
It is defined such that the inverse of the s-matrix cascaded with itself is a unity scattering transfer parameter (T) matrix.
Returns
---------
inv : a :class:`Network` object
a :class:`Network` object with 'inverse' s-parameters.
See Also
----------
inv : function which implements the inverse s-matrix
"""
if self.number_of_ports < 2:
raise (TypeError('One-Port Networks don\'t have inverses'))
out = self.copy()
out.s = inv(self.s)
out.deembed = True
return out
@property
def f(self):
"""
the frequency vector for the network, in Hz.
Returns
--------
f : :class:`numpy.ndarray`
frequency vector in Hz
See Also
---------
frequency : frequency property that holds all frequency
information
"""
return self.frequency.f
@f.setter
def f(self, f):
tmpUnit = self.frequency.unit
self.frequency = Frequency.from_f(f, unit=tmpUnit)
@property
def noisy(self):
"""
whether this network has noise
"""
try:
return self.noise is not None and self.noise_freq is not None
except:
return False
@property
def n(self):
"""
the ABCD form of the noise correlation matrix for the network
"""
if not self.noisy:
raise ValueError('network does not have noise')
if self.noise_freq.f.size > 1 :
noise_real = interp1d(self.noise_freq.f, self.noise.real, axis=0, kind=Network.noise_interp_kind)
noise_imag = interp1d(self.noise_freq.f, self.noise.imag, axis=0, kind=Network.noise_interp_kind)
return noise_real(self.frequency.f) + 1.0j * noise_imag(self.frequency.f)
else :
noise_real = self.noise.real
noise_imag = self.noise.imag
return noise_real + 1.0j * noise_imag
@property
def f_noise(self):
"""
the frequency vector for the noise of the network, in Hz.
"""
if not self.noisy:
raise ValueError('network does not have noise')
return self.noise_freq
@property
def y_opt(self):
"""
the optimum source admittance to minimize noise
"""
noise = self.n
return (npy.sqrt(noise[:,1,1]/noise[:,0,0] - npy.square(npy.imag(noise[:,0,1]/noise[:,0,0])))
+ 1.j*npy.imag(noise[:,0,1]/noise[:,0,0]))
@property
def z_opt(self):
"""
the optimum source impedance to minimize noise
"""
return 1./self.y_opt
@property
def g_opt(self):
"""
the optimum source reflection coefficient to minimize noise
"""
return z2s(self.z_opt.reshape((self.f.shape[0], 1, 1)), self.z0[:,0])[:,0,0]
@property
def nfmin(self):
"""
the minimum noise figure for the network
"""
noise = self.n
return npy.real(1. + (noise[:,0,1] + noise[:,0,0] * npy.conj(self.y_opt))/(2*K_BOLTZMANN*T0))
@property
def nfmin_db(self):
"""
the minimum noise figure for the network in dB
"""
return mf.complex_2_db10(self.nfmin)
def nf(self, z):
"""
the noise figure for the network if the source impedance is z
"""
z0 = self.z0
y_opt = self.y_opt
fmin = self.nfmin
rn = self.rn
ys = 1./z
gs = npy.real(ys)
return fmin + rn/gs * npy.square(npy.absolute(ys - y_opt))
def nfdb_gs(self, gs):
"""
return dB(NF) foreach gamma_source x noise_frequency
"""
g = self.copy().s11
nfreq = self.noise_freq.npoints
if isinstance(gs, (int, float, complex)) :
g.s[:,0,0] = gs
nfdb = 10.*npy.log10(self.nf( g.z[:,0,0]))
elif isinstance(gs, npy.ndarray) :
npt = gs.shape[0]
z = self.z0[0,0] * (1+gs)/(1-gs)
zf = npy.broadcast_to(z[:,None], tuple((npt, nfreq)))
nfdb = 10.*npy.log10(self.nf( zf))
else :
g.s[:,0,0] = -1
nfdb = 10.*npy.log10(self.nf( g.z[:,0,0]))
return nfdb
'''
newnetw.nfdb_gs(complex(.7,-0.2))
gs = complex(.7,-0.2)
gs = np.arange(0,0.9,0.1)
self = newnetw
self.
'''
@property
def rn(self):
"""
the equivalent noise resistance for the network
"""
return npy.real(self.n[:,0,0]/(4.*K_BOLTZMANN*T0))
# SECONDARY PROPERTIES
@property
def number_of_ports(self):
"""
the number of ports the network has.
Returns
--------
number_of_ports : number
the number of ports the network has.
"""
try:
return self.s.shape[1]
except (AttributeError):
return 0
@property
def nports(self):
"""
the number of ports the network has.
Returns
--------
number_of_ports : number
the number of ports the network has.
"""
return self.number_of_ports
@property
def port_tuples(self):
"""
Returns a list of tuples, for each port index pair
A convenience function for the common task fo iterating over
all s-parameters index pairs
This just calls:
`[(y,x) for x in range(self.nports) for y in range(self.nports)]`
"""
return [(y, x) for x in range(self.nports) for y in range(self.nports)]
@property
def passivity(self):
"""
passivity metric for a multi-port network.
This returns a matrix who's diagonals are equal to the total
power received at all ports, normalized to the power at a single
excitement port.
mathematically, this is a test for unitary-ness of the
s-parameter matrix [#]_.
for two port this is
.. math::
( |S_{11}|^2 + |S_{21}|^2 \, , \, |S_{22}|^2+|S_{12}|^2)
in general it is
.. math::
S^H \\cdot S
where :math:`H` is conjugate transpose of S, and :math:`\\cdot`
is dot product.
Returns
---------
passivity : :class:`numpy.ndarray` of shape fxnxn
References
------------
.. [#] http://en.wikipedia.org/wiki/Scattering_parameters#Lossless_networks
"""
return passivity(self.s)
@property
def reciprocity(self):
"""
reciprocity metric for a multi-port network.
This returns the difference between the s-parameter matrix
and its transpose.
for two port this is
.. math::
S - S^T
where :math:`T` is transpose of S
Returns
---------
reciprocity : :class:`numpy.ndarray` of shape fxnxn
"""
return reciprocity(self.s)
@property
def reciprocity2(self):
"""
Reciprocity metric #2
.. math::
abs(1 - S/S^T )
for the two port case, this evaluates to the distance of the
determinant of the wave-cascading matrix from unity.
"""
return abs(1 - self.s / self.s.swapaxes(1, 2))
@property
def stability(self):
"""
Stability factor
.. math::
K = ( 1 - |S_{11}|^2 - |S_{22}|^2 + |D|^2 ) / (2 * |S_{12}| * |S_{21}|)
D = S_{11} S_{22} - S_{12} S_{21}
Returns
---------
"""
assert self.nports == 2, "Stability factor K is only defined for two ports"
D = self.s[:, 0, 0] * self.s[:, 1, 1] - self.s[:, 0, 1] * self.s[:, 1, 0]
K = (1 - npy.abs(self.s[:, 0, 0]) ** 2 - npy.abs(self.s[:, 1, 1]) ** 2 + npy.abs(D) ** 2) / (
2 * npy.abs(self.s[:, 0, 1]) * npy.abs(self.s[:, 1, 0]))
return K
@property
def group_delay(self):
"""
The group delay
Usually used as a measure of dispersion (or distortion).
Defined as the derivative of the unwrapped s-parameter phase
(in rad) with respect to the frequency.
-d(self.s_rad_unwrap)/d(self.frequency.w)
https://en.wikipedia.org/wiki/Group_delay_and_phase_delay
"""
gd = self.s * 0 # quick way to make a new array of correct shape
phi = self.s_rad_unwrap
dw = self.frequency.dw
for m, n in self.port_tuples:
dphi = gradient(phi[:, m, n])
gd[:, m, n] = -dphi / dw
return gd
## NETWORK CLASSIFIERs
def is_reciprocal(self, tol=mf.ALMOST_ZERO):
'''
test for reciprocity
'''
return npy.allclose(reciprocity(self.s), npy.zeros_like(self.s), atol=tol)
def is_symmetric(self, n=1, port_order={}, tol=mf.ALMOST_ZERO):
'''
Returns whether the 2N-port network has n-th order reflection symmetry
by checking s_ii == s_jj for appropriate pair(s) of i and j.
https://en.wikipedia.org/wiki/Two-port_network#Scattering_parameters_(S-parameters)
Parameters
----------
n : int
Order of line symmetry to test for
port_order : dict[int, int]
Renumbering of zero-indexed ports before testing
tol: float
Tolerance in numeric comparisons
Raises
------
ValueError
(1) If the network has an odd number of ports
(2) If n is not in the range 1 to N
(3) If n does not evenly divide 2N
(4) If port_order is not a valid reindexing of ports
e.g. specifying x->y but not y->z, specifying x->y twice,
or using an index outside the range 0 to 2N-1
'''
z, y, x = self.s.shape # z is number of frequencies, and x is number of ports (2N)
if x % 2 != 0 or x != y:
raise ValueError('test of symmetric is only valid for a 2N-port network')
N = x // 2
if n <= 0 or n > N:
raise ValueError('specified order n = ' + str(n) + ' must be ' +
'between 1 and N = ' + str(N) + ', inclusive')
if x % n != 0:
raise ValueError('specified order n = ' + str(n) + ' must evenly divide ' +
'N = ' + str(N))
from_ports = list(map(lambda key: int(key), port_order.keys()))
to_ports = list(map(lambda val: int(val), port_order.values()))
test_network = self.copy() # TODO: consider defining renumbered()
if len(from_ports) > 0 and len(to_ports) > 0:
test_network.renumber(from_ports, to_ports)
mat = npy.matrix(test_network.s)
offs = npy.array(range(0, N)) # port index offsets from each mirror line
for k in range(0, N, N // n): # iterate through n mirror lines
mirror = k*npy.ones_like(offs)
i, j = mirror-1 - offs, mirror + offs
if not npy.allclose(mat[i, i], mat[j, j], atol=tol):
return False
return True
def is_passive(self, tol=mf.ALMOST_ZERO):
'''
test for passivity
'''
try:
M = npy.square(self.passivity)
except ValueError:
return False
I = npy.identity(M.shape[-1])
for f_idx in range(len(M)):
D = I - M[f_idx, :, :] # dissipation matrix
if not mf.is_positive_definite(D) \
and not mf.is_positive_semidefinite(mat=D, tol=tol):
return False
return True
def is_lossless(self, tol=mf.ALMOST_ZERO):
'''
test for losslessness
[S] is lossless iff [S] is unitary ([S][S]* = [1])
https://en.wikipedia.org/wiki/Unitary_matrix
'''
for f_idx in range(len(self.s)):
mat = npy.matrix(self.s[f_idx, :, :])
if not mf.is_unitary(mat, tol=tol):
return False
return True
## CLASS METHODS
def copy(self):
'''
Returns a copy of this Network
Needed to allow pass-by-value for a Network instead of
pass-by-reference
'''
ntwk = Network(s=self.s,
frequency=self.frequency.copy(),
z0=self.z0, s_def=self.s_def
)
ntwk.name = self.name
if self.noise is not None and self.noise_freq is not None:
if False :
ntwk.noise = npy.copy(self.noise)
ntwk.noise_freq = npy.copy(self.noise_freq)
ntwk.noise = self.noise.copy()
ntwk.noise_freq = self.noise_freq.copy()
try:
ntwk.port_names = copy(self.port_names)
except(AttributeError):
ntwk.port_names = None
return ntwk
def copy_from(self, other):
'''
Copies the contents of another Network into self
Uses copy, so that the data is passed-by-value, not reference
Parameters
-----------
other : Network
the network to copy the contents of
Examples
-----------
>>> a = rf.N()
>>> b = rf.N('my_file.s2p')
>>> a.copy_from (b)
'''
for attr in ['_s', 'frequency', '_z0', 'name']:
self.__setattr__(attr, copy(other.__getattribute__(attr)))
def copy_subset(self, key):
'''
Returns a copy of a frequency subset of this Network
Needed to allow pass-by-value for a subset Network instead of
pass-by-reference
Parameters
-----------
key : numpy array
the array indices of the frequencies to take
'''
ntwk = Network(s=self.s[key,:],
frequency=self.frequency[key].copy(),
z0=self.z0[key,:],
)
if isinstance(self.name, str):
ntwk.name = self.name + '_subset'
else:
ntwk.name = self.name
if self.noise is not None and self.noise_freq is not None:
ntwk.noise = npy.copy(self.noise[key,:])
ntwk.noise_freq = npy.copy(self.noise_freq[key])
try:
ntwk.port_names = copy(self.port_names)
except(AttributeError):
ntwk.port_names = None
return ntwk
def set_noise_a(self, noise_freq=None, nfmin_db=0, gamma_opt=0, rn=1 ) :
'''
sets the "A" (ie cascade) representation of the correlation matrix, based on the
noise frequency and input parameters.
'''
sh_fr = noise_freq.f.shape
nfmin_db = npy.broadcast_to(npy.atleast_1d(nfmin_db), sh_fr)
gamma_opt = npy.broadcast_to(npy.atleast_1d(gamma_opt), sh_fr)
rn = npy.broadcast_to(npy.atleast_1d(rn), sh_fr)
nf_min = npy.power(10., nfmin_db/10.)
# TODO maybe interpolate z0 as above
y_opt = 1./(self.z0[0, 0] * (1. + gamma_opt)/(1. - gamma_opt))
noise = 4.*K_BOLTZMANN*T0*npy.array(
[[rn, (nf_min-1.)/2. - rn*npy.conj(y_opt)],
[(nf_min-1.)/2. - rn*y_opt, npy.square(npy.absolute(y_opt)) * rn]]
)
self.noise = noise.swapaxes(0, 2).swapaxes(1, 2)
self.noise_freq = noise_freq
# touchstone file IO
def read_touchstone(self, filename):
"""
loads values from a touchstone file.
The work of this function is done through the
:class:`~skrf.io.touchstone` class.
Parameters
----------
filename : str or file-object
touchstone file name.
Notes
------
only the scattering parameters format is supported at the
moment
"""
from .io import touchstone
touchstoneFile = touchstone.Touchstone(filename)
if touchstoneFile.get_format().split()[1] != 's':
raise NotImplementedError('only s-parameters supported for now.')
self.comments = touchstoneFile.get_comments()
try:
self.variables = touchstoneFile.get_comment_variables()
except:
pass
self.port_names = touchstoneFile.port_names
# set z0 before s so that y and z can be computed
if touchstoneFile.is_from_hfss():
self.gamma, self.z0 = touchstoneFile.get_gamma_z0()
else:
self.z0 = complex(touchstoneFile.resistance)
f, self.s = touchstoneFile.get_sparameter_arrays() # note: freq in Hz
self.frequency = Frequency.from_f(f, unit='hz')
self.frequency.unit = touchstoneFile.frequency_unit
if touchstoneFile.noise is not None:
noise_freq = touchstoneFile.noise[:, 0] * touchstoneFile.frequency_mult
nfmin_db = touchstoneFile.noise[:, 1]
gamma_opt_mag = touchstoneFile.noise[:, 2]
gamma_opt_angle = npy.deg2rad(touchstoneFile.noise[:, 3])
# TODO maybe properly interpolate z0?
# it probably never actually changes
if touchstoneFile.version == '1.0':
rn = touchstoneFile.noise[:, 4] * self.z0[0, 0]
else:
rn = touchstoneFile.noise[:, 4]
gamma_opt = gamma_opt_mag * npy.exp(1j * gamma_opt_angle)
nf_min = npy.power(10., nfmin_db/10.)
# TODO maybe interpolate z0 as above
y_opt = 1./(self.z0[0, 0] * (1. + gamma_opt)/(1. - gamma_opt))
# use the voltage/current correlation matrix; this works nicely with
# cascading networks
self.noise_freq = Frequency.from_f(noise_freq, unit='hz')
self.noise_freq.unit = touchstoneFile.frequency_unit
self.set_noise_a(self.noise_freq, nfmin_db, gamma_opt, rn )
if self.name is None:
try:
self.name = os.path.basename(os.path.splitext(filename)[0])
# this may not work if filename is a file object
except(AttributeError, TypeError):
# in case they pass a file-object instead of file name,
# get the name from the touchstone file
try:
self.name = os.path.basename(os.path.splitext(touchstoneFile.filename)[0])
except():
print('warning: couldn\'t inspect network name')
self.name = ''
pass
@classmethod
def zipped_touchstone(cls, filename, archive):
"""
read a Network from a touchstone file in a ziparchive
Parameters
----------
filename : str
the full path filename of the touchstone file
archive : zipfile.ZipFile
the opened zip archive
"""
with archive.open(filename) as touchstone_file:
ntwk = cls()
ntwk.read_touchstone(touchstone_file)
return ntwk
def write_touchstone(self, filename=None, dir=None,
write_z0=False, skrf_comment=True,
return_string=False, to_archive=None,
form='ri',format_spec_A='{}',format_spec_B='{}',
format_spec_freq='{}'):
"""
Write a contents of the :class:`Network` to a touchstone file.
Parameters
----------
filename : a string, optional
touchstone filename, without extension. if 'None', then
will use the network's :attr:`name`.
dir : string, optional
the directory to save the file in.
write_z0 : boolean
write impedance information into touchstone as comments,
like Ansoft HFSS does
skrf_comment : bool, optional
write `created by skrf` comment
return_string : bool, optional
return the file_string rather than write to a file
to_archive : zipfile.Zipfile
opened ZipFile object to place touchstone file in
form : 'db','ma','ri'
format to write data,
* db = db, deg
* ma = mag, deg
* ri = real, imag
format_spec_A : string, optional
Any valid format specifying string as given by https://docs.python.org/3/library/string.html#format-string-syntax
This specifies the formatting in the resulting touchstone file for the A part of the S parameter, (e.g. the dB magnitude for 'db' format, the linear
magnitude for 'ma' format, or the real part for 'ri' format)
format_spec_B : string, optional
Any valid format specifying string as given by https://docs.python.org/3/library/string.html#format-string-syntax
This specifies the formatting in the resulting touchstone file for the B part of the S parameter, (e.g. the angle in degrees for 'db' format,
the angle in degrees for 'ma' format, or the imaginary part for 'ri' format)
format_spec_freq : string, optional
Any valid format specifying string as given by https://docs.python.org/3/library/string.html#format-string-syntax
This specifies the formatting in the resulting touchstone file for the frequency.
Notes
-------
format supported at the moment are,
[Hz/kHz/MHz/GHz] S [DB/MA/RI]
Frequency unit can be changed by setting Network.frequency.unit property
The functionality of this function should take place in the
:class:`~skrf.touchstone.touchstone` class.
"""
# according to Touchstone 2.0 spec
# [no tab, max. 4 coeffs per line, etc.]
if filename is None:
if self.name is not None:
filename = self.name
else:
raise ValueError('No filename given. Network must have a name, or you must provide a filename')
if get_extn(filename) is None:
filename = filename + '.s%ip' % self.number_of_ports
if dir is not None:
filename = os.path.join(dir, filename)
# set internal variables according to form
form = form.upper()
if form == "RI":
formatDic = {"labelA": "Re", "labelB": "Im"}
funcA = npy.real
funcB = npy.imag
elif form == "DB":
formatDic = {"labelA": "dB", "labelB": "ang"}
funcA = mf.complex_2_db
funcB = mf.complex_2_degree
elif form == "MA":
formatDic = {"labelA": "mag", "labelB": "ang"}
funcA = mf.complex_2_magnitude
funcB = mf.complex_2_degree
else:
raise ValueError('`form` must be either `db`,`ma`,`ri`')
# add formatting to funcA and funcB so we don't have to write it out many many times.
def c2str_A(c):
'''Function which takes a complex number for the A part of param and returns an appropriately formatted string'''
return format_spec_A.format(funcA(c))
def c2str_B(c):
'''Function which takes a complex number for B part of param and returns an appropriately formatted string'''
return format_spec_B.format(funcB(c))
def get_buffer():
if return_string is True or type(to_archive) is zipfile.ZipFile:
from .io.general import StringBuffer # avoid circular import
buf = StringBuffer()
else:
buf = open(filename, "w")
return buf
with get_buffer() as output:
# Add '!' Touchstone comment delimiters to the start of every line in self.comments
commented_header = ''
try:
if self.comments:
for comment_line in self.comments.split('\n'):
commented_header += '!{}\n'.format(comment_line)
except(AttributeError):
pass
if skrf_comment:
commented_header += '!Created with skrf (http://scikit-rf.org).\n'
output.write(commented_header)
# write header file.
# the '#' line is NOT a comment it is essential and it must be
# exactly this format, to work
# [HZ/KHZ/MHZ/GHZ] [S/Y/Z/G/H] [MA/DB/RI] [R n]
output.write('# {} S {} R {} \n'.format(self.frequency.unit, form, str(abs(self.z0[0, 0]))))
scaled_freq = self.frequency.f_scaled
if self.number_of_ports == 1:
# write comment line for users (optional)
output.write('!freq {labelA}S11 {labelB}S11\n'.format(**formatDic))
# write out data
for f in range(len(self.f)):
output.write(format_spec_freq.format(scaled_freq[f]) + ' ' \
+ c2str_A(self.s[f, 0, 0]) + ' ' \
+ c2str_B(self.s[f, 0, 0]) + '\n')
# write out the z0 following hfss's convention if desired
if write_z0:
output.write('! Port Impedance ')
for n in range(self.number_of_ports):
output.write('%.14f %.14f ' % (self.z0[f, n].real, self.z0[f, n].imag))
output.write('\n')
elif self.number_of_ports == 2:
# 2-port is a special case with
# - single line, and
# - S21,S12 in reverse order: legacy ?
# write comment line for users (optional)
output.write(
'!freq {labelA}S11 {labelB}S11 {labelA}S21 {labelB}S21 {labelA}S12 {labelB}S12 {labelA}S22 {labelB}S22\n'.format(
**formatDic))
# write out data
for f in range(len(self.f)):
output.write(format_spec_freq.format(scaled_freq[f]) + ' ' \
+ c2str_A(self.s[f, 0, 0]) + ' ' \
+ c2str_B(self.s[f, 0, 0]) + ' ' \
+ c2str_A(self.s[f, 1, 0]) + ' ' \
+ c2str_B(self.s[f, 1, 0]) + ' ' \
+ c2str_A(self.s[f, 0, 1]) + ' ' \
+ c2str_B(self.s[f, 0, 1]) + ' ' \
+ c2str_A(self.s[f, 1, 1]) + ' ' \
+ c2str_B(self.s[f, 1, 1]) + '\n')
# write out the z0 following hfss's convention if desired
if write_z0:
output.write('! Port Impedance')
for n in range(2):
output.write(' %.14f %.14f' % (self.z0[f, n].real, self.z0[f, n].imag))
output.write('\n')
elif self.number_of_ports == 3:
# 3-port is written over 3 lines / matrix order
# write comment line for users (optional)
output.write('!freq')
for m in range(1, 4):
for n in range(1, 4):
output.write(" {labelA}S{m}{n} {labelB}S{m}{n}".format(m=m, n=n, **formatDic))
output.write('\n!')
output.write('\n')
# write out data
for f in range(len(self.f)):
output.write(format_spec_freq.format(scaled_freq[f]))
for m in range(3):
for n in range(3):
output.write(' ' + c2str_A(self.s[f, m, n]) + ' ' \
+ c2str_B(self.s[f, m, n]))
output.write('\n')
# write out the z0 following hfss's convention if desired
if write_z0:
output.write('! Port Impedance')
for n in range(3):
output.write(' %.14f %.14f' % (self.z0[f, n].real, self.z0[f, n].imag))
output.write('\n')
elif self.number_of_ports >= 4:
# general n-port
# - matrix is written line by line
# - 4 complex numbers / 8 real numbers max. for a single line
# - continuation lines (anything except first) go with indent
# this is not part of the spec, but many tools handle it this way
# -> allows to parse without knowledge of number of ports
# write comment line for users (optional)
output.write('!freq')
for m in range(1, 1 + self.number_of_ports):
for n in range(1, 1 + self.number_of_ports):
if (n > 0 and (n % 4) == 0):
output.write('\n!')
output.write(" {labelA}S{m}{n} {labelB}S{m}{n}".format(m=m, n=n, **formatDic))
output.write('\n!')
output.write('\n')
# write out data
for f in range(len(self.f)):
output.write(format_spec_freq.format(scaled_freq[f]))
for m in range(self.number_of_ports):
for n in range(self.number_of_ports):
if (n > 0 and (n % 4) == 0):
output.write('\n')
output.write(' ' + c2str_A(self.s[f, m, n]) + ' ' \
+ c2str_B(self.s[f, m, n]))
output.write('\n')
# write out the z0 following hfss's convention if desired
if write_z0:
output.write('! Port Impedance')
for n in range(self.number_of_ports):
output.write(' %.14f %.14f' % (self.z0[f, n].real, self.z0[f, n].imag))
output.write('\n')
if type(to_archive) is zipfile.ZipFile:
to_archive.writestr(filename, output.getvalue())
elif return_string is True:
return output.getvalue()
def write(self, file=None, *args, **kwargs):
"""
Write the Network to disk using the :mod:`pickle` module.
The resultant file can be read either by using the Networks
constructor, :func:`__init__` , the read method :func:`read`, or
the general read function :func:`skrf.io.general.read`
Parameters
-----------
file : str or file-object
filename or a file-object. If left as None then the
filename will be set to Network.name, if its not None.
If both are None, ValueError is raised.
\*args, \*\*kwargs :
passed through to :func:`~skrf.io.general.write`
Notes
------
If the self.name is not None and file is can left as None
and the resultant file will have the `.ntwk` extension appended
to the filename.
Examples
---------
>>> n = rf.N(f=[1,2,3],s=[1,1,1],z0=50, name = 'open')
>>> n.write()
>>> n2 = rf.read('open.ntwk')
See Also
---------
skrf.io.general.write : write any skrf object
skrf.io.general.read : read any skrf object
"""
# this import is delayed until here because of a circular depency
from .io.general import write
if file is None:
if self.name is None:
raise (ValueError('No filename given. You must provide a filename, or set the name attribute'))
file = self.name
write(file, self, *args, **kwargs)
def read(self, *args, **kwargs):
"""
Read a Network from a 'ntwk' file
A ntwk file is written with :func:`write`. It is just a pickled
file.
Parameters
-------------
\*args, \*\*kwargs : args and kwargs
passed to :func:`skrf.io.general.write`
Notes
------
This function calls :func:`skrf.io.general.read`.
Examples
-----------
>>> rf.read('myfile.ntwk')
>>> rf.read('myfile.p')
See Also
----------
write
skrf.io.general.write
skrf.io.general.read
"""
from .io.general import read
self.copy_from(read(*args, **kwargs))
def write_spreadsheet(self, *args, **kwargs):
'''
Write contents of network to a spreadsheet, for your boss to use.
See Also
---------
skrf.io.general.network_2_spreadsheet
'''
from .io.general import network_2_spreadsheet
network_2_spreadsheet(self, *args, **kwargs)
def to_dataframe(self, *args, **kwargs):
"""
Convert attributes of a Network to a pandas DataFrame
See Also
---------
skrf.io.general.network_2_dataframe
"""
from .io.general import network_2_dataframe
return network_2_dataframe(self, *args, **kwargs)
def write_to_json_string(self):
"""
Serialize and convert network to a JSON string.
This is ~3x faster than writing to and reading back from touchstone for a 4port 20,000 point device.
See Also
---------
skrf.io.general.to_json_string
"""
from .io.general import to_json_string
return to_json_string(self)
# interpolation
def interpolate(self, freq_or_n, basis='s', coords='cart',
f_kwargs={}, return_array=False, **kwargs):
"""
Interpolate a Network allong frequency axis
The input 'freq_or_n` can be either a new
:class:`~skrf.frequency.Frequency` or an `int`, or a new
frequency vector (in hz).
This interpolates a given `basis`, ie s, z, y, etc, in the
coordinate system defined by `coord` like polar or cartesian.
Different interpolation types ('linear', 'quadratic') can be used
by passing appropriate `\*\*kwargs`. This function `returns` an
interpolated Network. Alternatively :func:`~Network.interpolate_self`
will interpolate self.
Parameters
-----------
freq_or_n : :class:`~skrf.frequency.Frequency` or int or listlike
The new frequency over which to interpolate. this arg may be
one of the following
* a new `Frequency` object
* if an int, the current frequency span is resampled linearly.
* if a listlike, then create its used to create a new frequency
object using `Frequency.from_f`
basis : ['s','z','y','a'],etc
The network parameter to interpolate
coords : ['cart','polar']
coordinate system to use for interpolation.
* 'cart' is cartesian is Re/Im
* 'polar' is unwrapped phase/mag
return_array: bool
return the interpolated array instead of re-asigning it to
a given attribute
**kwargs : keyword arguments
passed to :func:`scipy.interpolate.interp1d` initializer.
`kind` controls interpolation type.
`kind` = `rational` uses interpolation by rational polynomials.
`d` kwarg controls the degree of rational polynomials
when `kind`=`rational`. Defaults to 4.
Returns
----------
result : :class:`Network`
an interpolated Network, or array
Notes
--------
The interpolation cordinate system (`coords`) makes a big
difference for large ammounts of inerpolation. polar works well
for duts with slowly changing magnitude. try them all.
See :func:`scipy.interpolate.interpolate.interp1d` for useful
kwargs. For example
**kind** : str or int
Specifies the kind of interpolation as a string ('linear',
'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or
as an integer specifying the order of the spline
interpolator to use.
See Also
----------
resample
interpolate_self
interpolate_from_f
Examples
-----------
.. ipython::
@suppress
In [21]: import skrf as rf
In [21]: n = rf.data.ring_slot
In [21]: n
In [21]: new_freq = rf.Frequency(75,110,501,'ghz')
In [21]: n.interpolate(new_freq, kind = 'cubic')
"""
# make new network and fill with interpolated values
result = self.copy()
if kwargs.get('kind', None) == 'rational':
f_interp = mf.rational_interp
#Not supported by rational_interp
del kwargs['kind']
else:
f_interp = interp1d
# interpret input
if isinstance(freq_or_n, Frequency):
# input is a frequency object
new_frequency = freq_or_n
else:
dim = len(shape(freq_or_n))
if dim == 0:
# input is a number
n = int(freq_or_n)
new_frequency = self.frequency.copy()
new_frequency.npoints = n
elif dim == 1:
# input is a array, or list
new_frequency = Frequency.from_f(freq_or_n, **f_kwargs)
# set new frequency and pull some variables
result.frequency = new_frequency
f = self.frequency.f
f_new = new_frequency.f
# interpolate z0 ( this must happen first, because its needed
# to compute the basis transform below (like y2s), if basis!='s')
interp_z0_re = f_interp(f, self.z0.real, axis=0, **kwargs)
interp_z0_im = f_interp(f, self.z0.imag, axis=0, **kwargs)
result.z0 = interp_z0_re(f_new) + 1j * interp_z0_im(f_new)
# interpolate parameter for a given basis
x = self.__getattribute__(basis)
if coords == 'cart':
interp_re = f_interp(f, x.real, axis=0, **kwargs)
interp_im = f_interp(f, x.imag, axis=0, **kwargs)
x_new = interp_re(f_new) + 1j * interp_im(f_new)
elif coords == 'polar':
rad = npy.unwrap(npy.angle(x), axis=0)
mag = npy.abs(x)
interp_rad = f_interp(f, rad, axis=0, **kwargs)
interp_mag = f_interp(f, mag, axis=0, **kwargs)
x_new = interp_mag(f_new) * npy.exp(1j * interp_rad(f_new))
# interpolate noise data too
if self.noisy:
f_noise = self.noise_freq.f
f_noise_new = new_frequency.f
interp_noise_re = f_interp(f_noise, self.noise.real, axis=0, **kwargs)
interp_noise_im = f_interp(f_noise, self.noise.imag, axis=0, **kwargs)
noise_new = interp_noise_re(f_noise_new) + 1j * interp_noise_im(f_noise_new)
if return_array:
return x_new
else:
result.__setattr__(basis, x_new)
if self.noisy:
result.noise = noise_new
result.noise_freq = new_frequency
return result
def interpolate_self_npoints(self, npoints, **kwargs):
'''
Interpolate network based on a new number of frequency points
Parameters
-----------
npoints : int
number of frequency points
**kwargs : keyword arguments
passed to :func:`scipy.interpolate.interp1d` initializer.
See Also
---------
interpolate_self : same functionality but takes a Frequency
object
interpolate : same functionality but takes a Frequency
object and returns a new Network, instead of updating
itself.
Notes
-------
The function :func:`~Network.resample` is an alias for
:func:`~Network.interpolate_self_npoints`.
Examples
-----------
.. ipython::
@suppress
In [21]: import skrf as rf
In [21]: n = rf.data.ring_slot
In [21]: n
In [21]: n.resample(501) # resample is an alias
In [21]: n
'''
warnings.warn('Use interpolate_self', DeprecationWarning)
new_frequency = self.frequency.copy()
new_frequency.npoints = npoints
self.interpolate_self(new_frequency, **kwargs)
def interpolate_self(self, freq_or_n, **kwargs):
'''
Interpolates s-parameters given a new
:class:'~skrf.frequency.Frequency' object.
See :func:`~Network.interpolate` for more information.
Parameters
-----------
new_frequency : :class:`~skrf.frequency.Frequency`
frequency information to interpolate at
**kwargs : keyword arguments
passed to :func:`scipy.interpolate.interp1d` initializer.
See Also
----------
resample
interpolate
interpolate_from_f
'''
ntwk = self.interpolate(freq_or_n, **kwargs)
self.frequency, self.s, self.z0 = ntwk.frequency, ntwk.s, ntwk.z0
if self.noisy:
self.noise, self.noise_freq = ntwk.noise, ntwk.noise_freq
##convenience
resample = interpolate_self
def interpolate_from_f(self, f, interp_kwargs={}, **kwargs):
'''
Interpolates s-parameters from a frequency vector.
Given a frequency vector, and optionally a `unit` (see \*\*kwargs)
, interpolate the networks s-parameters linearly in real and
imaginary components.
See :func:`~Network.interpolate` for more information.
Parameters
-----------
new_frequency : :class:`~skrf.frequency.Frequency`
frequency information to interpolate at
interp_kwargs :
dictionary of kwargs to be passed through to
:func:`scipy.interpolate.interpolate.interp1d`
\*\*kwargs :
passed to :func:`scipy.interpolate.interp1d` initializer.
Notes
---------
This creates a new :class:`~skrf.frequency.Frequency`, object
using the method :func:`~skrf.frequency.Frequency.from_f`, and then calls
:func:`~Network.interpolate_self`.
See Also
----------
resample
interpolate
interpolate_self
'''
warnings.warn('Use interpolate', DeprecationWarning)
return self.interpolate(freq_or_n=f, f_kwargs=kwargs,
**interp_kwargs)
# freq = Frequency.from_f(f,**kwargs)
# self.interpolate_self(freq, **interp_kwargs)
def extrapolate_to_dc(self, points=None, dc_sparam=None, kind='rational',
coords='cart', **kwargs):
"""
Extrapolate S-parameters down to 0 Hz and interpolate to uniform spacing.
If frequency vector needs to be interpolated aliasing will occur in
time-domain. For the best results first frequency point should be a
multiple of the frequency step so that points from DC to
the first measured point can be added without interpolating rest of the
frequency points.
Parameters
-----------
points : int or None
Number of frequency points to be used in interpolation.
If None number of points is calculated based on the frequency step size
and spacing between 0 Hz and first measured frequency point.
dc_sparam : class:`numpy.ndarray` or None
NxN S-parameters matrix at 0 Hz.
If None S-parameters at 0 Hz are determined by linear extrapolation.
kind : str or int
Specifies the kind of interpolation as a string ('linear',
'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or
as an integer specifying the order of the spline
interpolator to use for `scipy.interp1d`.
`kind` = 'rational' uses interpolation by rational polynomials.
`d` kwarg controls the degree of rational polynomials
when `kind` is 'rational'. Defaults to 4.
coords : ['cart','polar']
coordinate system to use for interpolation.
* 'cart' is cartesian is Re/Im
* 'polar' is unwrapped phase/mag
Passed to :func:`Network.interpolate`
Returns
-----------
result : :class:`Network`
Extrapolated Network
See Also
----------
interpolate
impulse_response
step_response
"""
result = self.copy()
if self.frequency.f[0] == 0:
return result
if points == None:
fstep = self.frequency.f[1] - self.frequency.f[0]
points = len(self) + int(round(self.frequency.f[0]/fstep))
if dc_sparam == None:
#Interpolate DC point alone first using linear interpolation, because
#interp1d can't extrapolate with other methods.
#TODO: Option to enforce passivity
x = result.s[:2]
f = result.frequency.f[:2]
rad = npy.unwrap(npy.angle(x), axis=0)
mag = npy.abs(x)
interp_rad = interp1d(f, rad, axis=0, fill_value='extrapolate')
interp_mag = interp1d(f, mag, axis=0, fill_value='extrapolate')
dc_sparam = interp_mag(0) * npy.exp(1j * interp_rad(0))
else:
#Make numpy array if argument was list
dc_sparam = npy.array(dc_sparam)
result.s = npy.insert(result.s, 0, dc_sparam, axis=0)
result.frequency.f = npy.insert(result.frequency.f, 0, 0)
result.z0 = npy.insert(result.z0, 0, result.z0[0], axis=0)
new_f = Frequency(0, result.frequency.f_scaled[-1], points,
unit=result.frequency.unit)
#None of the default interpolation methods are too good
#and cause aliasing in the time domain.
#Best results are obtained when no interpolation is needed,
#e.g. first frequency point is a multiple of frequency step.
result.interpolate_self(new_f, kind=kind, coords=coords, **kwargs)
#DC value must have zero imaginary part
result.s[0,:,:] = result.s[0,:,:].real
return result
def crop(self, f_start, f_stop,unit =None):
'''
Crop Network based on start and stop frequencies.
No interpolation is done.
Parameters
-----------
f_start : number
start frequency of crop range, in units of self.frequency.unit.
If `f_start` is lower than the lowest frequency, no change to the network is made by the lower bound.
f_stop : number
stop frequency of crop range, in units of self.frequency.unit
If `f_stop` is higher than the highest frequency, no change to the network is made by the higher bound.
unit : string
Units that `f_start` and `f_stop` are described in. This must be a string recognized by the Frequency
class, e.g. 'Hz','MHz', etc. A value of `None` assumes units are same as `self`
'''
if f_start == None:
f_start = -npy.inf
if f_stop == None:
f_stop = npy.inf
if f_stop<f_start:
raise ValueError("`f_stop` was {}, which was smaller than `f_start`, which was {}".format(f_stop,f_start))
if unit is not None: # if `unit` is specified, we must retranslate the frequency units
scaleFactor = Frequency.multiplier_dict[unit.lower()]/self.frequency.multiplier# make a multiplier to put f_start and f_stop in the right units, e.g. 'GHz' -> 'MHz'
f_start *=scaleFactor
f_stop *=scaleFactor
if f_start > self.frequency.f_scaled.max():
raise ValueError("`f_start` was {}, which was larger than the largest frequency in this Network object, which was {}".format(f_start,self.frequency.f_scaled.max()))
if f_stop < self.frequency.f_scaled.min():
raise ValueError("`f_stop` was {}, which was smaller than the smallest frequency in this Network object, which was {}".format(f_stop,self.frequency.f_scaled.min()))
start_idx,stop_idx = 0,self.frequency.npoints-1 # start with entire frequency range selected
if f_start > self.frequency.f_scaled.min():
start_idx = find_nearest_index(self.frequency.f_scaled, f_start)
if f_start > self.frequency.f_scaled[start_idx]: # we do not want the start index to be at a frequency lower than `f_start`
start_idx += 1
if f_stop < self.frequency.f_scaled.max():
stop_idx = find_nearest_index(self.frequency.f_scaled, f_stop)
if f_stop < self.frequency.f_scaled[stop_idx]: # we don't want the stop index to be at a frequency higher than `f_stop`
stop_idx -=1
if stop_idx < start_idx :
raise ValueError("Stop index/frequency lower than start: stop_idx: {}, start_idx: {}, self.frequency.f[stop_idx]: {}, self.frequency.f[start_idx]: {}"\
.format(stop_idx,start_idx,self.frequency.f[stop_idx],self.frequency.f[start_idx] ))
ntwk = self[start_idx:stop_idx + 1]
self.frequency, self.s, self.z0 = ntwk.frequency, ntwk.s, ntwk.z0
def cropped(self, f_start, f_stop, unit=None):
'''
returns a cropped network, leaves self alone.
See Also
---------
crop
'''
out = self.copy()
out.crop(f_start=f_start, f_stop=f_stop,unit=unit)
return out
def flip(self):
'''
swaps the ports of a 2n-port Network
in case the network is 2n-port and n > 1, 'second' numbering scheme is
assumed to be consistent with the ** cascade operator.
::
-|0 n|- 0-|n 0|-n
-|1 n+1|- flip 1-|n+1 1|-n+1
... ... => ... ...
-|n-1 2n-1|- n-1-|2n-1 n-1|-2n-1
'''
if self.number_of_ports % 2 == 0:
n = int(self.number_of_ports / 2)
old = list(range(0, 2*n))
new = list(range(n, 2*n)) + list(range(0, n))
self.renumber(old, new)
else:
raise ValueError('you can only flip two-port Networks')
def flipped(self):
'''
returns a flipped network, leaves self alone.
See Also
---------
flip
'''
out = self.copy()
out.flip()
return out
def renormalize(self, z_new, s_def=S_DEF_DEFAULT):
'''
Renormalize s-parameter matrix given a new port impedances
Parameters
---------------
z_new : complex array of shape FxN, F, N or a scalar
new port impedances
s_def : str -> s_def : can be: 'power', 'pseudo' or 'traveling'
Scattering parameter definition : 'power' for power-waves definition,
'pseudo' for pseudo-waves definition.
'traveling' corresponds to the initial implementation.
Default is 'power'.
NB: results are the same for real-valued characteristic impedances.
See Also
----------
renormalize_s
fix_z0_shape
'''
self.s = renormalize_s(self.s, self.z0, z_new, s_def)
self.z0 = fix_z0_shape(z_new, self.frequency.npoints, self.nports)
def renumber(self, from_ports, to_ports):
'''
renumbers ports of a Network
Parameters
-----------
from_ports : list-like
to_ports: list-like
Examples
---------
To flip the ports of a 2-port network 'foo':
>>> foo.renumber( [0,1], [1,0] )
To rotate the ports of a 3-port network 'bar' so that port 0 becomes port 1:
>>> bar.renumber( [0,1,2], [1,2,0] )
To swap the first and last ports of a network 'duck':
>>> duck.renumber( [0,-1], [-1,0] )
'''
from_ports = npy.array(from_ports)
to_ports = npy.array(to_ports)
if len(npy.unique(from_ports)) != len(from_ports):
raise ValueError('an index can appear at most once in from_ports or to_ports')
if any(npy.unique(from_ports) != npy.unique(to_ports)):
raise ValueError('from_ports and to_ports must have the same set of indices')
self.s[:, to_ports, :] = self.s[:, from_ports, :] # renumber rows
self.s[:, :, to_ports] = self.s[:, :, from_ports] # renumber columns
self.z0[:, to_ports] = self.z0[:, from_ports]
def rotate(self, theta, unit='deg'):
'''
Rotate S-parameters
'''
if unit == 'deg':
theta = mf.degree_2_radian(theta )
self.s = self.s * npy.exp(-1j*theta)
def delay(self, d, unit='deg', port=0, media=None,**kw):
'''
Add phase delay to a given port.
This will cascade a matched line of length `d/2` from a given `media`
in front of `port`. If `media==None`, then freespace is used.
Parameters
----------
d : number
the length of transmissin line (see unit argument)
unit : ['deg','rad','m','cm','um','in','mil','s','us','ns','ps']
the units of d. See :func:`Media.to_meters`, for details
port : int
port to add delay to.
media: skrf.media.Media
media object to use for generating delay. If None, this will
default to freespace.
'''
if d ==0:
return self
d=d/2.
if self.nports >2:
raise NotImplementedError('only implemented for 1 and 2 ports')
if media is None:
from .media import Freespace
media = Freespace(frequency=self.frequency,z0=self.z0[:,port])
l =media.line(d=d, unit=unit,**kw)
return l**self
def windowed(self, window=('kaiser', 6), normalize=True, center_to_dc=None):
'''
Return a windowed version of s-matrix. Used in time-domain analysis.
When using time domain through :attr:`s_time_db`,
or similar properies, the spectrum is usually windowed,
before the IFFT is taken. This is done to
compensate for the band-pass nature of a spectrum [1]_ .
This function calls :func:`scipy.signal.get_window` which gives
more details about the windowing.
Parameters
-----------
window : string, float, or tuple
The type of window to create. See :func:`scipy.signal.get_window`
for details.
normalize : bool
Normalize the window to preserve power. ie
sum(ntwk.s,axis=0) == sum(ntwk.windowed().s,axis=0)
center_to_dc : bool or None
If True only the positive half of the window is applied to the signal.
This should be used if frequency vector begins from DC or from "close enough" to DC.
If False full window is used which also attenuates low frequencies.
If None then value is determined automatically based on if frequency
vector begins from 0.
Examples
-----------
>>> ntwk = rf.Network('myfile.s2p')
>>> ntwk_w = ntwk.windowed()
>>> ntwk_w.plot_s_time_db()
References
-------------
.. [1] Agilent Time Domain Analysis Using a Network Analyzer Application Note 1287-12
'''
if center_to_dc == None:
center_to_dc = self.frequency.f[0] == 0
if center_to_dc:
window = signal.get_window(window, 2*len(self))[len(self):]
else:
window = signal.get_window(window, len(self))
window = window.reshape(-1, 1, 1) * npy.ones((len(self),
self.nports,
self.nports))
windowed = self * window
if normalize:
# normalize the s-parameters to account for power lost in windowing
windowed.s = windowed.s * npy.sum(self.s_mag, axis=0) / \
npy.sum(windowed.s_mag, axis=0)
return windowed
def time_gate(self, *args, **kw):
'''
time gate this ntwk
see `skrf.time_domain.time_gate`
'''
return time_gate(self, *args, **kw)
# noise
def add_noise_polar(self, mag_dev, phase_dev, **kwargs):
'''
adds a complex zero-mean gaussian white-noise.
adds a complex zero-mean gaussian white-noise of a given
standard deviation for magnitude and phase
Parameters
------------
mag_dev : number
standard deviation of magnitude
phase_dev : number
standard deviation of phase [in degrees]
'''
phase_rv = stats.norm(loc=0, scale=phase_dev).rvs(size=self.s.shape)
mag_rv = stats.norm(loc=0, scale=mag_dev).rvs(size=self.s.shape)
phase = (self.s_deg + phase_rv)
mag = self.s_mag + mag_rv
self.s = mag * npy.exp(1j * npy.pi / 180. * phase)
def add_noise_polar_flatband(self, mag_dev, phase_dev, **kwargs):
'''
adds a flatband complex zero-mean gaussian white-noise signal of
given standard deviations for magnitude and phase
Parameters
------------
mag_dev : number
standard deviation of magnitude
phase_dev : number
standard deviation of phase [in degrees]
'''
phase_rv = stats.norm(loc=0, scale=phase_dev).rvs(size=self.s[0].shape)
mag_rv = stats.norm(loc=0, scale=mag_dev).rvs(size=self.s[0].shape)
phase = (self.s_deg + phase_rv)
mag = self.s_mag + mag_rv
self.s = mag * npy.exp(1j * npy.pi / 180. * phase)
def multiply_noise(self, mag_dev, phase_dev, **kwargs):
'''
multiplys a complex bivariate gaussian white-noise signal
of given standard deviations for magnitude and phase.
magnitude mean is 1, phase mean is 0
takes:
mag_dev: standard deviation of magnitude
phase_dev: standard deviation of phase [in degrees]
n_ports: number of ports. defualt to 1
returns:
nothing
'''
phase_rv = stats.norm(loc=0, scale=phase_dev).rvs( \
size=self.s.shape)
mag_rv = stats.norm(loc=1, scale=mag_dev).rvs( \
size=self.s.shape)
self.s = mag_rv * npy.exp(1j * npy.pi / 180. * phase_rv) * self.s
def nudge(self, amount=1e-12):
'''
Perturb s-parameters by small amount.
This is useful to work-around numerical bugs.
Notes
-----------
This function is
self.s = self.s + 1e-12
Parameters
------------
amount : number,
amount to add to s parameters
'''
self.s = self.s + amount
# other
def func_on_parameter(self, func, attr='s', *args, **kwargs):
'''
Applies a function parameter matrix, one frequency slice at a time
This is useful for functions that can only operate on 2d arrays,
like numpy.linalg.inv. This loops over f and calls
`func(ntwkA.s[f,:,:], *args, **kwargs)`
Parameters
------------
func : func
function to apply to s-parameters, on a single-freqency slice.
(ie func(ntwkA.s[0,:,:], *args, **kwargs)
\*args, \*\*kwargs :
passed to the func
Examples
-----------
>>> from numpy.linalg import inv
>>> ntwk.func_on_parameter(inv)
'''
ntwkB = self.copy()
p = self.__getattribute__(attr)
ntwkB.s = npy.r_[[func(p[k, :, :], *args, **kwargs) \
for k in range(len(p))]]
return ntwkB
def nonreciprocity(self, m, n, normalize=False):
'''
Normalized non-reciprocity metric.
This is a port-by-port measure of how non-reciprocal a n-port
network is. It is defined by,
.. math::
(S_{mn} - S_{nm}) / \\sqrt ( S_{mn} S_{nm} )
'''
forward = self.__getattribute__('s%i%i' % (m, n))
reverse = self.__getattribute__('s%i%i' % (n, m))
if normalize:
denom = forward * reverse
denom.s = npy.sqrt(denom.s)
return (forward - reverse) / denom
else:
return (forward - reverse)
# generalized mixed mode transformations
# XXX: experimental implementation of gmm s parameters
# TODO: automated test cases
def se2gmm(self, p, z0_mm=None):
'''
Transform network from single ended parameters to generalized mixed mode parameters [1]
[1] Ferrero and Pirola; Generalized Mixed-Mode S-Parameters; IEEE Transactions on
Microwave Theory and Techniques; Vol. 54; No. 1; Jan 2006
Parameters
------------
p : int, number of differential ports
z0_mm: f x n x n matrix of mixed mode impedances, optional
if input is None, 100 Ohms differential and 25 Ohms common mode reference impedance
Odd Number of Ports
-------------------
In the case where there are an odd number of ports (such as a 3-port network
with ports 0, 1, and 2), se2gmm() assumes that the last port (port 2) remains
single-ended and ports 0 and 1 are converted to differntial mode and common
mode, respectively. For networks in which the port ordering is not suitable,
port renumbering can be used.
For example, a 3-port single-ended network is converted to mixed-mode
parameters.
| Port 0 (single-ended, 50 ohms) --> Port 0 (single-ended, 50 ohms)
| Port 1 (single-ended, 50 ohms) --> Port 1 (differential mode, 100 ohms)
| Port 2 (single-ended, 50 ohms) --> Port 2 (common mode, 25 ohms)
>>> ntwk.renumber([0,1,2],[2,1,0])
>>> ntwk.se2gmm(p=1)
>>> ntwk.renumber([2,1,0],[0,1,2])
In the resulting network, port 0 is single-ended, port 1 is
differential mode, and port 2 is common mode.
.. warning::
This is not fully tested, and should be considered as experimental
'''
# XXX: assumes 'proper' order (first differential ports, then single ended ports)
if z0_mm is None:
z0_mm = self.z0.copy()
z0_mm[:, 0:p] = 100 # differential mode impedance
z0_mm[:, p:2 * p] = 25 # common mode impedance
Xi_tilde_11, Xi_tilde_12, Xi_tilde_21, Xi_tilde_22 = self._Xi_tilde(p, self.z0, z0_mm)
A = Xi_tilde_21 + npy.einsum('...ij,...jk->...ik', Xi_tilde_22, self.s)
B = Xi_tilde_11 + npy.einsum('...ij,...jk->...ik', Xi_tilde_12, self.s)
self.s = npy.transpose(npy.linalg.solve(npy.transpose(B, (0, 2, 1)).conj(), npy.transpose(A, (0, 2, 1)).conj()),
(0, 2, 1)).conj() # (34)
self.z0 = z0_mm
def gmm2se(self, p, z0_se=None):
'''
Transform network from generalized mixed mode parameters [1] to single ended parameters
[1] Ferrero and Pirola; Generalized Mixed-Mode S-Parameters; IEEE Transactions on
Microwave Theory and Techniques; Vol. 54; No. 1; Jan 2006
Parameters
------------
p : int, number of differential ports
z0_mm: f x n x n matrix of single ended impedances, optional
if input is None, assumes 50 Ohm reference impedance
.. warning::
This is not fully tested, and should be considered as experimental
'''
# TODO: testing of reverse transformation
# XXX: assumes 'proper' order (differential ports, single ended ports)
if z0_se is None:
z0_se = self.z0.copy()
z0_se[:] = 50
Xi_tilde_11, Xi_tilde_12, Xi_tilde_21, Xi_tilde_22 = self._Xi_tilde(p, z0_se, self.z0)
A = Xi_tilde_22 - npy.einsum('...ij,...jk->...ik', self.s, Xi_tilde_12)
B = Xi_tilde_21 - npy.einsum('...ij,...jk->...ik', self.s, Xi_tilde_11)
self.s = npy.linalg.solve(A, B) # (35)
self.z0 = z0_se
# generalized mixed mode supplement functions
_T = npy.array([[1, 0, -1, 0], [0, 0.5, 0, -0.5], [0.5, 0, 0.5, 0], [0, 1, 0, 1]]) # (5)
def _m(self, z0):
scaling = npy.sqrt(z0.real) / (2 * npy.abs(z0))
Z = npy.ones((z0.shape[0], 2, 2), dtype=npy.complex128)
Z[:, 0, 1] = z0
Z[:, 1, 1] = -z0
return scaling[:, npy.newaxis, npy.newaxis] * Z
def _M(self, j, k, z0_se): # (14)
M = npy.zeros((self.f.shape[0], 4, 4), dtype=npy.complex128)
M[:, :2, :2] = self._m(z0_se[:, j])
M[:, 2:, 2:] = self._m(z0_se[:, k])
return M
def _M_circle(self, l, p, z0_mm): # (12)
M = npy.zeros((self.f.shape[0], 4, 4), dtype=npy.complex128)
M[:, :2, :2] = self._m(z0_mm[:, l]) # differential mode impedance of port pair
M[:, 2:, 2:] = self._m(z0_mm[:, p + l]) # common mode impedance of port pair
return M
def _X(self, j, k, l, p, z0_se, z0_mm): # (15)
return npy.einsum('...ij,...jk->...ik', self._M_circle(l, p, z0_mm).dot(self._T),
npy.linalg.inv(self._M(j, k, z0_se))) # matrix multiplication elementwise for each frequency
def _P(self, p): # (27) (28)
n = self.nports
Pda = npy.zeros((p, 2 * n), dtype=npy.bool)
Pdb = npy.zeros((p, 2 * n), dtype=npy.bool)
Pca = npy.zeros((p, 2 * n), dtype=npy.bool)
Pcb = npy.zeros((p, 2 * n), dtype=npy.bool)
Pa = npy.zeros((n - 2 * p, 2 * n), dtype=npy.bool)
Pb = npy.zeros((n - 2 * p, 2 * n), dtype=npy.bool)
for l in npy.arange(p):
Pda[l, 4 * (l + 1) - 3 - 1] = True
Pca[l, 4 * (l + 1) - 1 - 1] = True
Pdb[l, 4 * (l + 1) - 2 - 1] = True
Pcb[l, 4 * (l + 1) - 1] = True
if Pa.shape[0] != 0:
Pa[l, 4 * p + 2 * (l + 1) - 1 - 1] = True
Pb[l, 4 * p + 2 * (l + 1) - 1] = True
return npy.concatenate((Pda, Pca, Pa, Pdb, Pcb, Pb))
def _Q(self): # (29) error corrected
n = self.nports
Qa = npy.zeros((n, 2 * n), dtype=npy.bool)
Qb = npy.zeros((n, 2 * n), dtype=npy.bool)
for l in npy.arange(n):
Qa[l, 2 * (l + 1) - 1 - 1] = True
Qb[l, 2 * (l + 1) - 1] = True
return npy.concatenate((Qa, Qb))
def _Xi(self, p, z0_se, z0_mm): # (24)
n = self.nports
Xi = npy.ones(self.f.shape[0])[:, npy.newaxis, npy.newaxis] * npy.eye(2 * n, dtype=npy.complex128)
for l in npy.arange(p):
Xi[:, 4 * l:4 * l + 4, 4 * l:4 * l + 4] = self._X(l * 2, l * 2 + 1, l, p, z0_se, z0_mm)
return Xi
def _Xi_tilde(self, p, z0_se, z0_mm): # (31)
n = self.nports
P = npy.ones(self.f.shape[0])[:, npy.newaxis, npy.newaxis] * self._P(p)
QT = npy.ones(self.f.shape[0])[:, npy.newaxis, npy.newaxis] * self._Q().T
Xi = self._Xi(p, z0_se, z0_mm)
Xi_tilde = npy.einsum('...ij,...jk->...ik', npy.einsum('...ij,...jk->...ik', P, Xi), QT)
return Xi_tilde[:, :n, :n], Xi_tilde[:, :n, n:], Xi_tilde[:, n:, :n], Xi_tilde[:, n:, n:]
def impulse_response(self, window='hamming', n=None, pad=1000, bandpass=None):
"""Calculates time-domain impulse response of one-port.
First frequency must be 0 Hz for the transformation to be accurate and
the frequency step must be uniform. Positions of the reflections are
accurate even if the frequency doesn't begin from 0, but shapes will
be distorted.
Real measurements should be extrapolated to DC and interpolated to
uniform frequency spacing.
Y-axis is the reflection coefficient.
Parameters
-----------
window : string
FFT windowing function.
n : int
Length of impulse response output.
If n is not specified, 2 * (m - 1) points are used,
where m = len(self) + pad
pad : int
Number of zeros to add as padding for FFT.
Adding more zeros improves accuracy of peaks.
bandpass : bool or None
If False window function is center on 0 Hz.
If True full window is used and low frequencies are attenuated.
If None value is determined automatically based on if the
frequency vector begins from 0.
Returns
---------
t : class:`numpy.ndarray`
Time vector
y : class:`numpy.ndarray`
Impulse response
See Also
-----------
step_response
extrapolate_to_dc
"""
if self.nports != 1:
raise ValueError('Only one-ports are supported')
if n is None:
# Use zero-padding specification. Note that this does not allow n odd.
n = 2 * (self.frequency.npoints + pad - 1)
fstep = self.frequency.step
if n % 2 == 0:
t = npy.flipud(npy.linspace(.5 / fstep, -.5 / fstep, n, endpoint=False))
else:
t = npy.flipud(npy.linspace(.5 / fstep, -.5 / fstep, n + 1, endpoint=False))
t = t[:-1]
if bandpass in (True, False):
center_to_dc = not bandpass
else:
center_to_dc = None
if window != None:
w = self.windowed(window=window, normalize=False, center_to_dc=center_to_dc)
else:
w = self
return t, mf.irfft(w.s, n=n).flatten()
def step_response(self, window='hamming', n=None, pad=1000):
"""Calculates time-domain step response of one-port.
First frequency must be 0 Hz for the transformation to be accurate and
the frequency step must be uniform.
Real measurements should be extrapolated to DC and interpolated to
uniform frequency spacing.
Y-axis is the reflection coefficient.
Parameters
-----------
window : string
FFT windowing function.
n : int
Length of step response output.
If n is not specified, 2 * (m - 1) points are used,
where m = len(self) + pad
pad : int
Number of zeros to add as padding for FFT.
Adding more zeros improves accuracy of peaks.
Returns
---------
t : class:`numpy.ndarray`
Time vector
y : class:`numpy.ndarray`
Step response
See Also
-----------
impulse_response
extrapolate_to_dc
"""
if self.nports != 1:
raise ValueError('Only one-ports are supported')
if self.frequency.f[0] != 0:
warnings.warn(
"Frequency doesn't begin from 0. Step response will not be correct.",
RuntimeWarning
)
if n is None:
# Use zero-padding specification. Note that this does not allow n odd.
n = 2 * (self.frequency.npoints + pad - 1)
fstep = self.frequency.step
if n % 2 == 0:
t = npy.flipud(npy.linspace(.5 / fstep, -.5 / fstep, n, endpoint=False))
else:
t = npy.flipud(npy.linspace(.5 / fstep, -.5 / fstep, n + 1, endpoint=False))
t = t[:-1]
if window != None:
w = self.windowed(window=window, normalize=False, center_to_dc=True)
else:
w = self
return t, npy.cumsum(mf.irfft(w.s, n=n).flatten())
# Network Active s/z/y/vswr parameters
def s_active(self, a):
'''
Returns the active s-parameters of the network for a defined wave excitation a.
The active s-parameter at a port is the reflection coefficients
when other ports are excited. It is an important quantity for active
phased array antennas.
Active s-parameters are defined by [#]_:
.. math::
\mathrm{active(s)}_{m} = \sum_{i=1}^N s_{mi}\\frac{a_i}{a_m}
where :math:`s` are the scattering parameters and :math:`N` the number of ports
Parameters
----------
a : complex array of shape (n_ports)
forward wave complex amplitude (pseudowave formulation [#]_)
Returns
---------
s_act : complex array of shape (n_freqs, n_ports)
active S-parameters for the excitation a
See Also
-----------
z_active : active Z-parameters
y_active : active Y-parameters
vswr_active : active VSWR
References
----------
.. [#] D. M. Pozar, IEEE Trans. Antennas Propag. 42, 1176 (1994).
.. [#] D. Williams, IEEE Microw. Mag. 14, 38 (2013).
'''
return s2s_active(self.s, a)
def z_active(self, a):
'''
Returns the active Z-parameters of the network for a defined wave excitation a.
The active Z-parameters are defined by:
.. math::
\mathrm{active}(z)_{m} = z_{0,m} \\frac{1 + \mathrm{active}(s)_m}{1 - \mathrm{active}(s)_m}
where :math:`z_{0,m}` is the characteristic impedance and
:math:`\mathrm{active}(s)_m` the active S-parameter of port :math:`m`.
Parameters
----------
a : complex array of shape (n_ports)
forward wave complex amplitude
Returns
----------
z_act : complex array of shape (nfreqs, nports)
active Z-parameters for the excitation a
See Also
-----------
s_active : active S-parameters
y_active : active Y-parameters
vswr_active : active VSWR
'''
return s2z_active(self.s, self.z0, a)
def y_active(self, a):
'''
Returns the active Y-parameters of the network for a defined wave excitation a.
The active Y-parameters are defined by:
.. math::
\mathrm{active}(y)_{m} = y_{0,m} \\frac{1 - \mathrm{active}(s)_m}{1 + \mathrm{active}(s)_m}
where :math:`y_{0,m}` is the characteristic admittance and
:math:`\mathrm{active}(s)_m` the active S-parameter of port :math:`m`.
Parameters
----------
a : complex array of shape (n_ports)
forward wave complex amplitude
Returns
----------
y_act : complex array of shape (nfreqs, nports)
active Y-parameters for the excitation a
See Also
-----------
s_active : active S-parameters
z_active : active Z-parameters
vswr_active : active VSWR
'''
return s2y_active(self.s, self.z0, a)
def vswr_active(self, a):
'''
Returns the active VSWR of the network for a defined wave excitation a.
The active VSWR is defined by :
.. math::
\mathrm{active}(vswr)_{m} = \\frac{1 + |\mathrm{active}(s)_m|}{1 - |\mathrm{active}(s)_m|}
where :math:`\mathrm{active}(s)_m` the active S-parameter of port :math:`m`.
Parameters
----------
a : complex array of shape (n_ports)
forward wave complex amplitude
Returns
----------
vswr_act : complex array of shape (nfreqs, nports)
active VSWR for the excitation a
See Also
-----------
s_active : active S-parameters
z_active : active Z-parameters
y_active : active Y-parameters
'''
return s2vswr_active(self.s, a)
#%%
## Functions operating on Network[s]
def connect(ntwkA, k, ntwkB, l, num=1):
'''
connect two n-port networks together.
specifically, connect ports `k` thru `k+num-1` on `ntwkA` to ports
`l` thru `l+num-1` on `ntwkB`. The resultant network has
(ntwkA.nports+ntwkB.nports-2*num) ports. The port indices ('k','l')
start from 0. Port impedances **are** taken into account.
When the two networks have overlapping frequencies, the resulting
network will contain only the overlapping frequencies.
Parameters
-----------
ntwkA : :class:`Network`
network 'A'
k : int
starting port index on `ntwkA` ( port indices start from 0 )
ntwkB : :class:`Network`
network 'B'
l : int
starting port index on `ntwkB`
num : int
number of consecutive ports to connect (default 1)
Returns
---------
ntwkC : :class:`Network`
new network of rank (ntwkA.nports + ntwkB.nports - 2*num)
See Also
-----------
connect_s : actual S-parameter connection algorithm.
innerconnect_s : actual S-parameter connection algorithm.
Notes
-------
the effect of mis-matched port impedances is handled by inserting
a 2-port 'mismatch' network between the two connected ports.
This mismatch Network is calculated with the
:func:`impedance_mismatch` function.
Examples
---------
To implement a *cascade* of two networks
>>> ntwkA = rf.Network('ntwkA.s2p')
>>> ntwkB = rf.Network('ntwkB.s2p')
>>> ntwkC = rf.connect(ntwkA, 1, ntwkB,0)
'''
# some checking
try:
check_frequency_equal(ntwkA, ntwkB)
except IndexError as e:
common_freq = npy.intersect1d(ntwkA.frequency.f, ntwkB.frequency.f, return_indices=True)
if common_freq[0].size == 0:
raise e
else:
ntwkA = ntwkA[common_freq[1]]
ntwkB = ntwkB[common_freq[2]]
warnings.warn("Using a frequency subset:\n" + str(ntwkA.frequency))
if (k + num - 1 > ntwkA.nports - 1):
raise IndexError('Port `k` out of range')
if (l + num - 1 > ntwkB.nports - 1):
raise IndexError('Port `l` out of range')
# create output Network, from copy of input
ntwkC = ntwkA.copy()
# if networks' z0's are not identical, then connect a impedance
# mismatch, which takes into account the effect of differing port
# impedances.
# import pdb;pdb.set_trace()
if assert_z0_at_ports_equal(ntwkA, k, ntwkB, l) == False:
ntwkC.s = connect_s(
ntwkA.s, k,
impedance_mismatch(ntwkA.z0[:, k], ntwkB.z0[:, l]), 0)
# the connect_s() put the mismatch's output port at the end of
# ntwkC's ports. Fix the new port's impedance, then insert it
# at position k where it belongs.
ntwkC.z0[:, k:] = npy.hstack((ntwkC.z0[:, k + 1:], ntwkB.z0[:, [l]]))
ntwkC.renumber(from_ports=[ntwkC.nports - 1] + list(range(k, ntwkC.nports - 1)),
to_ports=list(range(k, ntwkC.nports)))
# call s-matrix connection function
ntwkC.s = connect_s(ntwkC.s, k, ntwkB.s, l)
# combine z0 arrays and remove ports which were `connected`
ntwkC.z0 = npy.hstack(
(npy.delete(ntwkA.z0, range(k, k + 1), 1), npy.delete(ntwkB.z0, range(l, l + 1), 1)))
# if we're connecting more than one port, call innerconnect recursively
# untill all connections are made to finish the job
if num > 1:
ntwkC = innerconnect(ntwkC, k, ntwkA.nports - 1 + l, num - 1)
# if ntwkB is a 2port, then keep port indices where you expect.
if ntwkB.nports == 2 and ntwkA.nports > 2 and num == 1:
from_ports = list(range(ntwkC.nports))
to_ports = list(range(ntwkC.nports))
to_ports.pop(k-1);
to_ports.append(k-1)
ntwkC.renumber(from_ports=from_ports,
to_ports=to_ports)
# if ntwkA and ntwkB are both 2port, and either one has noise, calculate ntwkC's noise
either_are_noisy = False
try:
either_are_noisy = ntwkA.noisy or ntwkB.noisy
except:
pass
if num == 1 and ntwkA.nports == 2 and ntwkB.nports == 2 and either_are_noisy:
if ntwkA.noise_freq is not None and ntwkB.noise_freq is not None and ntwkA.noise_freq != ntwkB.noise_freq:
raise IndexError('Networks must have same noise frequency. See `Network.interpolate`')
cA = ntwkA.noise
cB = ntwkB.noise
noise_freq = ntwkA.noise_freq
if noise_freq is None:
noise_freq = ntwkB.noise_freq
if cA is None:
cA = npy.broadcast_arrays(npy.array([[0., 0.], [0., 0.]]), ntwkB.noise)[0]
if cB is None:
cB = npy.broadcast_arrays(npy.array([[0., 0.], [0., 0.]]), ntwkA.noise)[0]
if k == 0:
# if we're connecting to the "input" port of ntwkA, recalculate the equivalent noise of ntwkA,
# since we're modeling the noise as a pair of sources at the "input" port
# TODO
raise (NotImplementedError)
if l == 1:
# if we're connecting to the "output" port of ntwkB, recalculate the equivalent noise,
# since we're modeling the noise as a pair of sources at the "input" port
# TODO
raise (NotImplementedError)
# interpolate abcd into the set of noise frequencies
if ntwkA.deembed :
if ntwkA.frequency.f.size > 1 :
a_real = interp1d(ntwkA.frequency.f, ntwkA.inv.a.real,
axis=0, kind=Network.noise_interp_kind)
a_imag = interp1d(ntwkA.frequency.f, ntwkA.inv.a.imag,
axis=0, kind=Network.noise_interp_kind)
a = a_real(noise_freq.f) + 1.j * a_imag(noise_freq.f)
else :
a_real = ntwkA.inv.a.real
a_imag = ntwkA.inv.a.imag
a = a_real + 1.j * a_imag
a = npy_inv(a)
a_H = npy.conj(a.transpose(0, 2, 1))
cC = npy.matmul(a, npy.matmul(cB -cA, a_H))
else :
if ntwkA.frequency.f.size > 1 :
a_real = interp1d(ntwkA.frequency.f, ntwkA.a.real,
axis=0, kind=Network.noise_interp_kind)
a_imag = interp1d(ntwkA.frequency.f, ntwkA.a.imag,
axis=0, kind=Network.noise_interp_kind)
a = a_real(noise_freq.f) + 1.j * a_imag(noise_freq.f)
else :
a_real = ntwkA.a.real
a_imag = ntwkA.a.imag
a = a_real + 1.j * a_imag
a_H = npy.conj(a.transpose(0, 2, 1))
cC = npy.matmul(a, npy.matmul(cB, a_H)) + cA
ntwkC.noise = cC
ntwkC.noise_freq = noise_freq
return ntwkC
#%%
def connect_fast(ntwkA, k, ntwkB, l):
"""
Connect two n-port networks together (using C-implementation)
Specifically, connect ports `k` on `ntwkA` to ports
`l` thru on `ntwkB`. The resultant network has
(ntwkA.nports+ntwkB.nports-2) ports. The port indices ('k','l')
start from 0. Port impedances **are** taken into account.
Parameters
-----------
ntwkA : :class:`Network`
network 'A'
k : int
starting port index on `ntwkA` ( port indices start from 0 )
ntwkB : :class:`Network`
network 'B'
l : int
starting port index on `ntwkB`
Returns
---------
ntwkC : :class:`Network`
new network of rank (ntwkA.nports + ntwkB.nports - 2)
See Also
-----------
:mod:`skrf.src`
Notes
-------
the effect of mis-matched port impedances is handled by inserting
a 2-port 'mismatch' network between the two connected ports.
This mismatch Network is calculated with the
:func:`impedance_mismatch` function.
Examples
---------
To implement a *cascade* of two networks
>>> ntwkA = rf.Network('ntwkA.s2p')
>>> ntwkB = rf.Network('ntwkB.s2p')
>>> ntwkC = rf.connect(ntwkA, 1, ntwkB,0)
"""
num = 1
from .src import connect_s_fast
# some checking
check_frequency_equal(ntwkA, ntwkB)
# create output Network, from copy of input
ntwkC = ntwkA.copy()
# if networks' z0's are not identical, then connect a impedance
# mismatch, which takes into account the effect of differing port
# impedances.
if assert_z0_at_ports_equal(ntwkA, k, ntwkB, l) == False:
ntwkC.s = connect_s(
ntwkA.s, k,
impedance_mismatch(ntwkA.z0[:, k], ntwkB.z0[:, l]), 0)
# the connect_s() put the mismatch's output port at the end of
# ntwkC's ports. Fix the new port's impedance, then insert it
# at position k where it belongs.
ntwkC.z0[:, k:] = npy.hstack((ntwkC.z0[:, k + 1:], ntwkB.z0[:, [l]]))
ntwkC.renumber(from_ports=[ntwkC.nports - 1] + range(k, ntwkC.nports - 1),
to_ports=range(k, ntwkC.nports))
# call s-matrix connection function
ntwkC.s = connect_s_fast(ntwkC.s, k, ntwkB.s, l)
# combine z0 arrays and remove ports which were `connected`
ntwkC.z0 = npy.hstack(
(npy.delete(ntwkA.z0, range(k, k + num), 1), npy.delete(ntwkB.z0, range(l, l + num), 1)))
return ntwkC
def innerconnect(ntwkA, k, l, num=1):
'''
connect ports of a single n-port network.
this results in a (n-2)-port network. remember port indices start
from 0.
Parameters
-----------
ntwkA : :class:`Network`
network 'A'
k,l : int
starting port indices on ntwkA ( port indices start from 0 )
num : int
number of consecutive ports to connect
Returns
---------
ntwkC : :class:`Network`
new network of rank (ntwkA.nports - 2*num)
See Also
-----------
connect_s : actual S-parameter connection algorithm.
innerconnect_s : actual S-parameter connection algorithm.
Notes
-------
a 2-port 'mismatch' network is inserted between the connected ports
if their impedances are not equal.
Examples
---------
To connect ports '0' and port '1' on ntwkA
>>> ntwkA = rf.Network('ntwkA.s3p')
>>> ntwkC = rf.innerconnect(ntwkA, 0,1)
'''
if (k + num - 1 > ntwkA.nports - 1):
raise IndexError('Port `k` out of range')
if (l + num - 1 > ntwkA.nports - 1):
raise IndexError('Port `l` out of range')
# create output Network, from copy of input
ntwkC = ntwkA.copy()
if not (ntwkA.z0[:, k] == ntwkA.z0[:, l]).all():
# connect a impedance mismatch, which will takes into account the
# effect of differing port impedances
mismatch = impedance_mismatch(ntwkA.z0[:, k], ntwkA.z0[:, l])
ntwkC.s = connect_s(ntwkA.s, k, mismatch, 0)
# print 'mismatch %i-%i'%(k,l)
# the connect_s() put the mismatch's output port at the end of
# ntwkC's ports. Fix the new port's impedance, then insert it
# at position k where it belongs.
ntwkC.z0[:, k:] = npy.hstack((ntwkC.z0[:, k + 1:], ntwkC.z0[:, [l]]))
ntwkC.renumber(from_ports=[ntwkC.nports - 1] + list(range(k, ntwkC.nports - 1)),
to_ports=list(range(k, ntwkC.nports)))
# call s-matrix connection function
ntwkC.s = innerconnect_s(ntwkC.s, k, l)
# update the characteristic impedance matrix
ntwkC.z0 = npy.delete(ntwkC.z0, list(range(k, k + 1)) + list(range(l, l + 1)), 1)
# recur if we're connecting more than one port
if num > 1:
ntwkC = innerconnect(ntwkC, k, l - 1, num - 1)
return ntwkC
def cascade(ntwkA, ntwkB):
'''
Cascade two 2, 2N-ports Networks together
Connects ports N through 2N-1 on `ntwkA` to ports 0 through N of
`ntwkB`. This calls `connect()`, which is a more general function.
Use `Network.renumber` to change port order if needed.
Notes
------
connection diagram:
::
A B
+---------+ +---------+
-|0 N |---|0 N |-
-|1 N+1|---|1 N+1|-
... ... ... ...
-|N-2 2N-2|---|N-2 2N-2|-
-|N-1 2N-1|---|N-1 2N-1|-
+---------+ +---------+
Parameters
-----------
ntwkA : :class:`Network`
network `ntwkA`
ntwkB : Network
network `ntwkB`
Returns
--------
C : Network
the resultant network of ntwkA cascaded with ntwkB
See Also
---------
connect : connects two Networks together at arbitrary ports.
Network.renumber : changes the port order of a network
'''
if ntwkA.nports<2:
raise ValueError('nports must be >1')
N = int(ntwkA.nports/2 )
if ntwkB.nports == 1:
# we are terminating a N-port with a 1-port.
# which port on self to use is ambiguous. choose N
return connect(ntwkA, N, ntwkB, 0)
elif ntwkA.nports % 2 == 0 and ntwkA.nports == ntwkB.nports:
# we have two 2N-port balanced networks
return connect(ntwkA, N, ntwkB, 0, num=N)
elif ntwkA.nports % 2 == 0 and ntwkA.nports == 2 * ntwkB.nports:
# we have a 2N-port balanced network terminated by a N-port network
return connect(ntwkA, N, ntwkB, 0, num=N)
else:
raise ValueError('I dont know what to do, check port shapes of Networks')
def cascade_list(l):
"""
cascade a list of 2N-port networks
all networks must have same frequency
Parameters
--------------
l : list-like
(ordered) list of networks
Returns
----------
out : 2-port Network
the results of cascading all networks in the list `l`
"""
return reduce(cascade, l)
def de_embed(ntwkA, ntwkB):
'''
De-embed `ntwkA` from `ntwkB`.
This calls `ntwkA.inv ** ntwkB`. The syntax of cascading an inverse
is more explicit, it is recommended that it be used instead of this
function.
Parameters
-----------
ntwkA : :class:`Network`
network `ntwkA`
ntwkB : :class:`Network`
network `ntwkB`
Returns
--------
C : Network
the resultant network of ntwkB de-embedded from ntwkA
See Also
---------
connect : connects two Networks together at arbitrary ports.
'''
return ntwkA.inv ** ntwkB
def stitch(ntwkA, ntwkB, **kwargs):
'''
Stitches ntwkA and ntwkB together.
Concatenates two networks' data. Given two networks that cover
different frequency bands this can be used to combine their data
into a single network.
Parameters
------------
ntwkA, ntwkB : :class:`Network` objects
Networks to stitch together
\*\*kwargs : keyword args
passed to :class:`Network` constructor, for output network
Returns
---------
ntwkC : :class:`Network`
result of stitching the networks `ntwkA` and `ntwkB` together
Examples
----------
>>> from skrf.data import wr2p2_line, wr1p5_line
>>> rf.stitch(wr2p2_line, wr1p5_line)
2-Port Network: 'wr2p2,line', 330-750 GHz, 402 pts, z0=[ 50.+0.j 50.+0.j]
'''
A, B = ntwkA, ntwkB
C = Network(
frequency=Frequency.from_f(npy.r_[A.f[:], B.f[:]], unit='hz'),
s=npy.r_[A.s, B.s],
z0=npy.r_[A.z0, B.z0],
name=A.name,
**kwargs
)
C.frequency.unit = A.frequency.unit
return C
def overlap(ntwkA, ntwkB):
'''
Returns the overlapping parts of two Networks, interpolating if needed.
If frequency vectors for each ntwk don't perfectly overlap, then
ntwkB is interpolated so that the resultant networks have identical
frequencies.
Parameters
------------
ntwkA : :class:`Network`
a ntwk which overlaps `ntwkB`. (the `dominant` network)
ntwkB : :class:`Network`
a ntwk which overlaps `ntwkA`.
Returns
-----------
ntwkA_new : :class:`Network`
part of `ntwkA` that overlapped `ntwkB`
ntwkB_new : :class:`Network`
part of `ntwkB` that overlapped `ntwkA`, possibly interpolated
See Also
------------
:func:`skrf.frequency.overlap_freq`
'''
new_freq = ntwkA.frequency.overlap(ntwkB.frequency)
return ntwkA.interpolate(new_freq), ntwkB.interpolate(new_freq)
def concat_ports(ntwk_list, port_order='second', *args, **kw):
'''
Concatenate networks along the port axis
Notes
-------
The `port_order` ='first', means front-to-back, while
`port_oder`='second' means left-to-right. So, for example, when
concating two 2-networks, `A` and `B`, the ports are ordered as follows:
'first'
a0 o---o a1 -> 0 o---o 1
b0 o---o b1 -> 2 o---o 3
'second'
a0 o---o a1 -> 0 o---o 2
b0 o---o b1 -> 1 o---o 3
use `Network.renumber` to change port ordering.
Parameters
-----------
ntwk_list : list of skrf.Networks
ntwks to concatenate
port_order : ['first', 'second']
Examples
-----------
>>>concat([ntwkA,ntwkB])
>>>concat([ntwkA,ntwkB,ntwkC,ntwkD], port_order='second')
To put for lines in parallel
>>> from skrf import air
>>> l1 = air.line(100, z0=[0,1])
>>> l2 = air.line(300, z0=[2,3])
>>> l3 = air.line(400, z0=[4,5])
>>> l4 = air.line(400, z0=[6,7])
>>> concat_ports([l1,l2,l3,l4], port_order='second')
See Also
--------
stitch : concatenate two networks along the frequency axis
renumber : renumber ports
'''
# if ntwk list is longer than 2, recursively call myself
# until we are done
if len(ntwk_list) > 2:
f = lambda x, y: concat_ports([x, y], port_order='first')
out = reduce(f, ntwk_list)
# if we want to renumber ports, we have to wait
# until after the recursive calls
if port_order == 'second':
N = out.nports
old_order = list(range(N))
new_order = list(range(0, N, 2)) + list(range(1, N, 2))
out.renumber(new_order, old_order)
return out
ntwkA, ntwkB = ntwk_list
if ntwkA.frequency != ntwkB.frequency:
raise ValueError('ntwks don\'t have matching frequencies')
A = ntwkA.s
B = ntwkB.s
nf = A.shape[0] # num frequency points
nA = A.shape[1] # num ports on A
nB = B.shape[1] # num ports on B
nC = nA + nB # num ports on C
# create composite matrix, appending each sub-matrix diagonally
C = npy.zeros((nf, nC, nC), dtype='complex')
C[:, :nA, :nA] = A.copy()
C[:, nA:, nA:] = B.copy()
ntwkC = ntwkA.copy()
ntwkC.s = C
ntwkC.z0 = npy.hstack([ntwkA.z0, ntwkB.z0])
if port_order == 'second':
old_order = list(range(nC))
new_order = list(range(0, nC, 2)) + list(range(1, nC, 2))
ntwkC.renumber(old_order, new_order)
return ntwkC
def average(list_of_networks, polar=False):
'''
Calculates the average network from a list of Networks.
This is complex average of the s-parameters for a list of Networks.
Parameters
-----------
list_of_networks : list of :class:`Network` objects
the list of networks to average
Returns
---------
ntwk : :class:`Network`
the resultant averaged Network
Notes
------
This same function can be accomplished with properties of a
:class:`~skrf.networkset.NetworkSet` class.
Examples
---------
>>> ntwk_list = [rf.Network('myntwk.s1p'), rf.Network('myntwk2.s1p')]
>>> mean_ntwk = rf.average(ntwk_list)
'''
out_ntwk = list_of_networks[0].copy()
if polar:
# average the mag/phase components individually
raise NotImplementedError
else:
# average the re/im components individually
for a_ntwk in list_of_networks[1:]:
out_ntwk += a_ntwk
out_ntwk.s = out_ntwk.s / (len(list_of_networks))
return out_ntwk
def one_port_2_two_port(ntwk):
'''
calculates the two-port network given a symmetric, reciprocal and
lossless one-port network.
takes:
ntwk: a symmetric, reciprocal and lossless one-port network.
returns:
ntwk: the resultant two-port Network
'''
result = ntwk.copy()
result.s = npy.zeros((result.frequency.npoints, 2, 2), dtype=complex)
s11 = ntwk.s[:, 0, 0]
result.s[:, 0, 0] = s11
result.s[:, 1, 1] = s11
## HACK: TODO: verify this mathematically
result.s[:, 0, 1] = npy.sqrt(1 - npy.abs(s11) ** 2) * \
npy.exp(1j * (
npy.angle(s11) + npy.pi / 2. * (npy.angle(s11) < 0) - npy.pi / 2 * (npy.angle(s11) > 0)))
result.s[:, 1, 0] = result.s[:, 0, 1]
result.z0 = npy.hstack([ntwk.z0,ntwk.z0])
return result
def chopinhalf(ntwk, *args, **kwargs):
'''
Chops a sandwich of identical, reciprocal 2-ports in half.
Given two identical, reciprocal 2-ports measured in series,
this returns one.
Notes
--------
In other words, given
.. math::
B = A\\cdot\\cdotA
Return A, where A port2 is connected to A port1. The result may
be found through signal flow graph analysis and is,
.. math::
a_{11} = \frac{b_{11}}{1+b_{12}}
a_{22} = \frac{b_{22}}{1+b_{12}}
a_{12}^2 = b_{21}(1-\frac{b_{11}b_{22}}{(1+b_{12})^2}
Parameters
------------
ntwk : :class:`Network`
a 2-port that is equal to two identical two-ports in cascade
'''
if ntwk.nports != 2:
raise ValueError('Only valid on 2ports')
b11, b22, b12 = ntwk.s11, ntwk.s22, ntwk.s12
kwargs['name'] = kwargs.get('name', ntwk.name)
a11 = b11 / (1 + b12)
a22 = b22 / (1 + b12)
a21 = b12 * (1 - b11 * b22 / (1 + b12) ** 2) # this is a21^2 here
a21.s = mf.sqrt_phase_unwrap(a21.s)
A = n_oneports_2_nport([a11, a21, a21, a22], *args, **kwargs)
return A
def evenodd2delta(n, z0=50, renormalize=True, doublehalf=True):
'''
Convert ntwk's s-matrix from even/odd mode into a delta (normal) s-matrix
This assumes even/odd ports are ordered [1e,1o,2e,2o]
This is useful for handling coupler sims. Only 4-ports supported for now.
Parameters
----------
n : skrf.Network
Network with an even/odd mode s-matrix
z0: number, list of numbers
the characteristic impedance to set output networks port impedance
to , and used to renormalize s-matrix before conversio if
`renormalize`=True.
renormalize : Bool
if impedances are in even/odd then they must be renormalized to
get correct transformation
doublehalf: Bool
convert even/odd impedances to double/half their values. this is
required if data comes from hfss waveports .
Returns
----------
out: skrf.Network
same network as `n` but with s-matrix in normal delta basis
See Also
----------
Network.se2gmm, Network.gmm2se
'''
# move even and odd ports, so we have even and odd
# s-matrices contiguous
n_eo = n.copy()
n_eo.renumber([0,1,2,3],[0,2,1,3])
if doublehalf:
n_eo.z0 = n_eo.z0*[2,2,.5,.5]
# if the n_eo s-matrix is given with e/o z0's we need
# to renormalie into 50
if renormalize:
n_eo.renormalize(z0)
even = n_eo.s[:,0:2,0:2]
odd = n_eo.s[:,2:4,2:4]
# compute sub-networks for symmetric 4port
s_a = .5*(even+odd)
s_b = .5*(even-odd)
# create output network
n_delta = n_eo.copy()
n_delta.s[:,0:2,0:2] = n_delta.s[:,2:4,2:4] = s_a
n_delta.s[:,2:4,0:2] = n_delta.s[:,0:2,2:4] = s_b
n_delta.z0=z0
return n_delta
## Building composit networks from sub-networks
def n_oneports_2_nport(ntwk_list, *args, **kwargs):
'''
Builds a N-port Network from list of N one-ports
Parameters
-----------
ntwk_list : list of :class:`Network` objects
must follow left-right, top-bottom order, ie, s11,s12,s21,s22
\*args, \*\*kwargs :
passed to :func:`Network.__init__` for the N-port
Returns
----------
nport : n-port :class:`Network`
result
'''
nports = int(npy.sqrt(len(ntwk_list)))
s_out = npy.concatenate(
[npy.concatenate(
[ntwk_list[(k + (l * nports))].s for k in range(nports)], 2) \
for l in range(nports)], 1)
z0 = npy.concatenate(
[ntwk_list[k].z0 for k in range(0, nports ** 2, nports + 1)], 1)
frequency = ntwk_list[0].frequency
return Network(s=s_out, z0=z0, frequency=frequency, *args, **kwargs)
def n_twoports_2_nport(ntwk_list, nports, offby=1, **kwargs):
'''
Builds a N-port Network from list of two-ports
This method was made to reconstruct a n-port network from 2-port
subnetworks as measured by a 2-port VNA. So, for example, given a
3-port DUT, you might measure the set p12.s2p, p23.s2p, p13.s2p.
From these measurements, you can construct p.s3p.
By default all entries of result.s are filled with 0's, in case you
dont fully specify the entire s-matrix of the resultant ntwk.
Parameters
-----------
ntwk_list : list of :class:`Network` objects
the names must contain the port index, ie 'p12' or 'p43'
offby : int
starting value for s-parameters idecies. ie a value of `1`,
assumes that a s21 = ntwk.s[:,1,0]
\*args, \*\*kwargs :
passed to :func:`Network.__init__` for the N-port
Returns
----------
nport : n-port :class:`Network`
result
See Also
--------
concat_ports : concatenate ntwks along their ports
'''
frequency = ntwk_list[0].frequency
nport = Network(frequency=frequency,
s=npy.zeros(shape=(frequency.npoints, nports, nports)),
**kwargs)
for subntwk in ntwk_list:
for m, n in nport.port_tuples:
if m != n and m > n:
if '%i%i' % (m + offby, n + offby) in subntwk.name:
pass
elif '%i%i' % (n + offby, m + offby) in subntwk.name:
subntwk = subntwk.flipped()
else:
continue
for mn, jk in zip(product((m, n), repeat=2), product((0, 1), repeat=2)):
m, n, j, k = mn[0], mn[1], jk[0], jk[1]
nport.s[:, m, n] = subntwk.s[:, j, k]
nport.z0[:, m] = subntwk.z0[:, j]
return nport
def four_oneports_2_twoport(s11, s12, s21, s22, *args, **kwargs):
'''
Builds a 2-port Network from list of four 1-ports
Parameters
-----------
s11 : one-port :class:`Network`
s11
s12 : one-port :class:`Network`
s12
s21 : one-port :class:`Network`
s21
s22 : one-port :class:`Network`
s22
\*args, \*\*kwargs :
passed to :func:`Network.__init__` for the twoport
Returns
----------
twoport : two-port :class:`Network`
result
See Also
-----------
n_oneports_2_nport
three_twoports_2_threeport
'''
return n_oneports_2_nport([s11, s12, s21, s22], *args, **kwargs)
def three_twoports_2_threeport(ntwk_triplet, auto_order=True, *args,
**kwargs):
'''
Creates 3-port from three 2-port Networks
This function provides a convenient way to build a 3-port Network
from a set of 2-port measurements. Which may occur when measuring
a three port device on a 2-port VNA.
Notes
---------
if `auto_order` is False, ntwk_triplet must be of port orderings:
[p12, p13, p23]
else if `auto_order`is True, then the 3 Networks in ntwk_triplet must
contain port identification in their names.
For example, their names may be like `me12`, `me13`, `me23`
Parameters
--------------
ntwk_triplet : list of 2-port Network objects
list of three 2-ports. see notes about order.
auto_order : bool
if True attempt to inspect port orderings from Network names.
Names must be like 'p12', 'p23', etc
contains : str
only files containing this string will be loaded.
\*args,\*\*kwargs :
passed to :func:`Network.__init__` for resultant network
Returns
------------
threeport : 3-port Network
See Also
-----------
n_oneports_2_nport
Examples
-----------
>>> rf.three_twoports_2_threeport(rf.read_all('.').values())
'''
raise DeprecationWarning('Use n_twoports_2_nport instead')
if auto_order:
p12, p13, p23 = None, None, None
s11, s12, s13, s21, s22, s23, s31, s32, s33 = None, None, None, None, None, None, None, None, None
for k in ntwk_triplet:
if '12' in k.name:
p12 = k
elif '13' in k.name:
p13 = k
elif '23' in k.name:
p23 = k
elif '21' in k.name:
p12 = k.flipped()
elif '31' in k.name:
p31 = k.flipped()
elif '32' in k.name:
p23 = k.flipped()
else:
p12, p13, p23 = ntwk_triplet
p21 = p12.flipped()
p31 = p13.flipped()
p32 = p23.flipped()
if p12 != None:
s11 = p12.s11
s12 = p12.s12
s21 = p12.s21
s22 = p12.s22
if p13 != None:
s11 = p13.s11
s13 = p13.s12
s31 = p13.s21
s33 = p13.s22
if p23 != None:
s22 = p23.s11
s23 = p23.s12
s32 = p23.s21
s33 = p23.s22
ntwk_list = [s11, s12, s13, s21, s22, s23, s31, s32, s33]
for k in range(len(ntwk_list)):
if ntwk_list[k] == None:
frequency = ntwk_triplet[0].frequency
s = npy.zeros((len(ntwk_triplet[0]), 1, 1))
ntwk_list[k] = Network(s=s, frequency=frequency)
threeport = n_oneports_2_nport(ntwk_list, *args, **kwargs)
return threeport
## Functions operating on s-parameter matrices
def connect_s(A, k, B, l):
'''
connect two n-port networks' s-matrices together.
specifically, connect port `k` on network `A` to port `l` on network
`B`. The resultant network has nports = (A.rank + B.rank-2). This
function operates on, and returns s-matrices. The function
:func:`connect` operates on :class:`Network` types.
Parameters
-----------
A : :class:`numpy.ndarray`
S-parameter matrix of `A`, shape is fxnxn
k : int
port index on `A` (port indices start from 0)
B : :class:`numpy.ndarray`
S-parameter matrix of `B`, shape is fxnxn
l : int
port index on `B`
Returns
-------
C : :class:`numpy.ndarray`
new S-parameter matrix
Notes
-------
internally, this function creates a larger composite network
and calls the :func:`innerconnect_s` function. see that function for more
details about the implementation
See Also
--------
connect : operates on :class:`Network` types
innerconnect_s : function which implements the connection
connection algorithm
'''
if k > A.shape[-1] - 1 or l > B.shape[-1] - 1:
raise (ValueError('port indices are out of range'))
nf = A.shape[0] # num frequency points
nA = A.shape[1] # num ports on A
nB = B.shape[1] # num ports on B
nC = nA + nB # num ports on C
# create composite matrix, appending each sub-matrix diagonally
C = npy.zeros((nf, nC, nC), dtype='complex')
C[:, :nA, :nA] = A.copy()
C[:, nA:, nA:] = B.copy()
# call innerconnect_s() on composit matrix C
return innerconnect_s(C, k, nA + l)
def innerconnect_s(A, k, l):
'''
connect two ports of a single n-port network's s-matrix.
Specifically, connect port `k` to port `l` on `A`. This results in
a (n-2)-port network. This function operates on, and returns
s-matrices. The function :func:`innerconnect` operates on
:class:`Network` types.
Parameters
-----------
A : :class:`numpy.ndarray`
S-parameter matrix of `A`, shape is fxnxn
k : int
port index on `A` (port indices start from 0)
l : int
port index on `A`
Returns
-------
C : :class:`numpy.ndarray`
new S-parameter matrix
Notes
-----
The algorithm used to calculate the resultant network is called a
'sub-network growth', can be found in [#]_. The original paper
describing the algorithm is given in [#]_.
References
----------
.. [#] Compton, R.C.; , "Perspectives in microwave circuit analysis," Circuits and Systems, 1989., Proceedings of the 32nd Midwest Symposium on , vol., no., pp.716-718 vol.2, 14-16 Aug 1989. URL: http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=101955&isnumber=3167
.. [#] Filipsson, Gunnar; , "A New General Computer Algorithm for S-Matrix Calculation of Interconnected Multiports," Microwave Conference, 1981. 11th European , vol., no., pp.700-704, 7-11 Sept. 1981. URL: http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4131699&isnumber=4131585
'''
if k > A.shape[-1] - 1 or l > A.shape[-1] - 1:
raise (ValueError('port indices are out of range'))
nA = A.shape[1] # num of ports on input s-matrix
# create an empty s-matrix, to store the result
C = npy.zeros(shape=A.shape, dtype='complex')
# loop through ports and calulates resultant s-parameters
for i in range(nA):
for j in range(nA):
C[:, i, j] = \
A[:, i, j] + \
(A[:, k, j] * A[:, i, l] * (1 - A[:, l, k]) + \
A[:, l, j] * A[:, i, k] * (1 - A[:, k, l]) + \
A[:, k, j] * A[:, l, l] * A[:, i, k] + \
A[:, l, j] * A[:, k, k] * A[:, i, l]) / \
((1 - A[:, k, l]) * (1 - A[:, l, k]) - A[:, k, k] * A[:, l, l])
# remove ports that were `connected`
C = npy.delete(C, (k, l), 1)
C = npy.delete(C, (k, l), 2)
return C
## network parameter conversion
def s2z(s, z0=50, s_def=S_DEF_DEFAULT):
'''
Convert scattering parameters [1]_ to impedance parameters [2]_
For power-waves, Eq.(19) from [3]:
.. math::
Z = F^{-1} (1 - S)^{-1} (S G + G^*) F
where :math:`G = diag([Z_0])` and :math:`F = diag([1/2\\sqrt{|Re(Z_0)|}])`
For pseudo-waves, Eq.(74) from [4]:
.. math::
Z = (1 - U^{-1} S U)^{-1} (1 + U^{-1} S U) G
where :math:`U = \\sqrt{Re(Z_0)}/|Z_0|`
Parameters
------------
s : complex array-like
scattering parameters
z0 : complex array-like or number
port impedances.
s_def : str -> s_def : can be: 'power', 'pseudo' or 'traveling'
Scattering parameter definition : 'power' for power-waves definition [3],
'pseudo' for pseudo-waves definition [4].
'traveling' corresponds to the initial implementation.
Default is 'power'.
Returns
---------
z : complex array-like
impedance parameters
References
----------
.. [1] http://en.wikipedia.org/wiki/S-parameters
.. [2] http://en.wikipedia.org/wiki/impedance_parameters
.. [3] Kurokawa, Kaneyuki "Power waves and the scattering matrix", IEEE Transactions on Microwave Theory and Techniques, vol.13, iss.2, pp. 194–202, March 1965.
.. [4] Marks, R. B. and Williams, D. F. "A general waveguide circuit theory", Journal of Research of National Institute of Standard and Technology, vol.97, iss.5, pp. 533–562, 1992.
'''
nfreqs, nports, nports = s.shape
z0 = fix_z0_shape(z0, nfreqs, nports)
# Add a small real part in case of pure imaginary char impedance
# to prevent numerical errors for both pseudo and power waves definitions
z0 = z0.astype(dtype=npy.complex)
z0[z0.real == 0] += ZERO
s = s.copy() # to prevent the original array from being altered
s[s == -1.] = -1. + 1e-12 # solve numerical singularity
s[s == 1.] = 1. + 1e-12 # solve numerical singularity
# The following is a vectorized version of a for loop for all frequencies.
# # Creating Identity matrices of shape (nports,nports) for each nfreqs
Id = npy.zeros_like(s) # (nfreqs, nports, nports)
npy.einsum('ijj->ij', Id)[...] = 1.0
if s_def == 'power':
# Power-waves. Eq.(19) from [3]
# Creating diagonal matrices of shape (nports,nports) for each nfreqs
F, G = npy.zeros_like(s), npy.zeros_like(s)
npy.einsum('ijj->ij', F)[...] = 1.0/npy.sqrt(z0.real)*0.5
npy.einsum('ijj->ij', G)[...] = z0
# z = npy.linalg.inv(F) @ npy.linalg.inv(Id - s) @ (s @ G + npy.conjugate(G)) @ F # Python > 3.5
z = npy.matmul(npy.linalg.inv(F),
npy.matmul(npy.linalg.inv(Id - s),
npy.matmul(npy.matmul(s, G) + npy.conjugate(G), F)))
elif s_def == 'pseudo':
# Pseudo-waves. Eq.(74) from [4]
# Creating diagonal matrices of shape (nports,nports) for each nfreqs
ZR, U = npy.zeros_like(s), npy.zeros_like(s)
npy.einsum('ijj->ij', U)[...] = npy.sqrt(z0.real)/npy.abs(z0)
npy.einsum('ijj->ij', ZR)[...] = z0
# USU = npy.linalg.inv(U) @ s @ U
# z = npy.linalg.inv(Id - USU) @ (Id + USU) @ ZR
USU = npy.matmul(npy.linalg.inv(U), npy.matmul(s , U))
z = npy.matmul(npy.linalg.inv(Id - USU), npy.matmul((Id + USU), ZR))
elif s_def == 'traveling':
# Traveling-waves definition. Cf.Wikipedia "Impedance parameters" page.
# Creating diagonal matrices of shape (nports, nports) for each nfreqs
sqrtz0 = npy.zeros_like(s) # (nfreqs, nports, nports)
npy.einsum('ijj->ij', sqrtz0)[...] = npy.sqrt(z0)
# s -> z
z = npy.zeros_like(s)
# z = sqrtz0 @ npy.linalg.inv(Id - s) @ (Id + s) @ sqrtz0 # Python>3.5
z = npy.matmul(npy.matmul(npy.matmul(sqrtz0, npy.linalg.inv(Id - s)), (Id + s)), sqrtz0)
return z
def s2y(s, z0=50, s_def=S_DEF_DEFAULT):
"""
convert scattering parameters [#]_ to admittance parameters [#]_
Equations are the inverse of :func:`s2z`.
Parameters
------------
s : complex array-like
scattering parameters
z0 : complex array-like or number
port impedances
s_def : str -> s_def : can be: 'power', 'pseudo' or 'traveling'
Scattering parameter definition : 'power' for power-waves definition [3],
'pseudo' for pseudo-waves definition [4].
'traveling' corresponds to the initial implementation.
Default is 'power'.
Returns
---------
y : complex array-like
admittance parameters
See Also
----------
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
----------
.. [#] http://en.wikipedia.org/wiki/S-parameters
.. [#] http://en.wikipedia.org/wiki/Admittance_parameters
.. [3] Kurokawa, Kaneyuki "Power waves and the scattering matrix", IEEE Transactions on Microwave Theory and Techniques, vol.13, iss.2, pp. 194–202, March 1965.
.. [4] Marks, R. B. and Williams, D. F. "A general waveguide circuit theory", Journal of Research of National Institute of Standard and Technology, vol.97, iss.5, pp. 533–562, 1992.
"""
nfreqs, nports, nports = s.shape
z0 = fix_z0_shape(z0, nfreqs, nports)
# Add a small real part in case of pure imaginary char impedance
# to prevent numerical errors for both pseudo and power waves definitions
z0 = z0.astype(dtype=npy.complex)
z0[z0.real == 0] += ZERO
s = s.copy() # to prevent the original array from being altered
s[s == -1.] = -1. + 1e-12 # solve numerical singularity
s[s == 1.] = 1. + 1e-12 # solve numerical singularity
# The following is a vectorized version of a for loop for all frequencies.
# Creating Identity matrices of shape (nports,nports) for each nfreqs
Id = npy.zeros_like(s) # (nfreqs, nports, nports)
npy.einsum('ijj->ij', Id)[...] = 1.0
if s_def == 'power':
# Power-waves. Inverse of Eq.(19) from [3]
# Creating diagonal matrices of shape (nports,nports) for each nfreqs
F, G = npy.zeros_like(s), npy.zeros_like(s)
npy.einsum('ijj->ij', F)[...] = 1.0/npy.sqrt(z0.real)*0.5
npy.einsum('ijj->ij', G)[...] = z0
# y = npy.linalg.inv(F) @ npy.linalg.inv((s @ G + npy.conjugate(G))) @ (Id - s) @ F # Python > 3.5
y = npy.matmul(npy.linalg.inv(F),
npy.matmul(npy.linalg.inv(npy.matmul(s, G) + npy.conjugate(G)),
npy.matmul((Id - s), F)))
elif s_def == 'pseudo':
# pseudo-waves. Inverse of Eq.(74) from [4]
YR, U = npy.zeros_like(s), npy.zeros_like(s)
npy.einsum('ijj->ij', U)[...] = npy.sqrt(z0.real)/npy.abs(z0)
npy.einsum('ijj->ij', YR)[...] = 1/z0
# USU = npy.linalg.inv(U) @ s @ U
# y = YR @ npy.linalg.inv(Id + USU) @ (Id - USU)
USU = npy.matmul(npy.linalg.inv(U), npy.matmul(s, U))
y = npy.matmul(YR, npy.matmul(npy.linalg.inv(Id + USU), (Id - USU)))
elif s_def == 'traveling':
# Traveling-waves definition. Cf.Wikipedia "Impedance parameters" page.
# Creating diagonal matrices of shape (nports, nports) for each nfreqs
sqrty0 = npy.zeros_like(s) # (nfreqs, nports, nports)
npy.einsum('ijj->ij', sqrty0)[...] = npy.sqrt(1.0/z0)
# s -> y
y = npy.zeros_like(s)
# y = sqrty0 @ (Id - s) @ npy.linalg.inv(Id + s) @ sqrty0 # Python>3.5
y = npy.matmul(npy.matmul(npy.matmul(sqrty0, (Id - s)), npy.linalg.inv(Id + s)), sqrty0)
return y
def s2t(s):
"""
Converts scattering parameters [#]_ to scattering transfer parameters [#]_ .
transfer parameters are also refered to as
'wave cascading matrix', this function only operates on 2N-ports
networks with same number of input and output ports, also known as
'balanced networks'.
Parameters
-----------
s : :class:`numpy.ndarray` (shape fx2nx2n)
scattering parameter matrix
Returns
-------
t : numpy.ndarray
scattering transfer parameters (aka wave cascading matrix)
See Also
---------
inv : calculates inverse s-parameters
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
-----------
.. [#] http://en.wikipedia.org/wiki/S-parameters
.. [#] http://en.wikipedia.org/wiki/Scattering_transfer_parameters#Scattering_transfer_parameters
.. [#] Janusz A. Dobrowolski, "Scattering Parameter in RF and Microwave Circuit Analysis and Design",
Artech House, 2016, pp. 65-68
"""
z, y, x = s.shape
# test here for even number of ports.
# s-parameter networks are square matrix, so x and y are equal.
if(x % 2 != 0):
raise IndexError('Network don\'t have an even number of ports')
t = npy.zeros((z, y, x), dtype=complex)
yh = int(y/2)
xh = int(x/2)
# S_II,I^-1
sinv = npy.linalg.inv(s[:, yh:y, 0:xh])
# np.linalg.inv test for singularity (matrix not invertible)
for k in range(len(s)):
# T_I,I = S_I,II - S_I,I . S_II,I^-1 . S_II,II
t[k, 0:yh, 0:xh] = s[k, 0:yh, xh:x] - s[k, 0:yh, 0:xh].dot(sinv[k].dot(s[k, yh:y, xh:x]))
# T_I,II = S_I,I . S_II,I^-1
t[k, 0:yh, xh:x] = s[k, 0:yh, 0:xh].dot(sinv[k])
# T_II,I = -S_II,I^-1 . S_II,II
t[k, yh:y, 0:xh] = -sinv[k].dot(s[k, yh:y, xh:x])
# T_II,II = S_II,I^-1
t[k, yh:y, xh:x] = sinv[k]
return t
def z2s(z, z0=50, s_def=S_DEF_DEFAULT):
"""
convert impedance parameters [1]_ to scattering parameters [2]_
For power-waves, Eq.(18) from [3]:
.. math::
S = F (Z – G^*) (Z + G)^{-1} F^{-1}
where :math:`G = diag([Z_0])` and :math:`F = diag([1/2\\sqrt{|Re(Z_0)|}])`
For pseudo-waves, Eq.(73) from [4]:
.. math::
S = U (Z - G) (Z + G)^{-1} U^{-1}
where :math:`U = \\sqrt{Re(Z_0)}/|Z_0|`
Parameters
------------
z : complex array-like
impedance parameters
z0 : complex array-like or number
port impedances
s_def : str -> s_def : can be: 'power', 'pseudo' or 'traveling'
Scattering parameter definition : 'power' for power-waves definition [3],
'pseudo' for pseudo-waves definition [4].
'traveling' corresponds to the initial implementation.
Default is 'power'.
Returns
---------
s : complex array-like
scattering parameters
References
----------
.. [1] http://en.wikipedia.org/wiki/impedance_parameters
.. [2] http://en.wikipedia.org/wiki/S-parameters
.. [3] Kurokawa, Kaneyuki "Power waves and the scattering matrix", IEEE Transactions on Microwave Theory and Techniques, vol.13, iss.2, pp. 194–202, March 1965.
.. [4] Marks, R. B. and Williams, D. F. "A general waveguide circuit theory", Journal of Research of National Institute of Standard and Technology, vol.97, iss.5, pp. 533–562, 1992.
"""
nfreqs, nports, nports = z.shape
z0 = fix_z0_shape(z0, nfreqs, nports)
# Add a small real part in case of pure imaginary char impedance
# to prevent numerical errors for both pseudo and power waves definitions
z0 = z0.astype(dtype=npy.complex)
z0[z0.real == 0] += ZERO
if s_def == 'power':
# Power-waves. Eq.(18) from [3]
# Creating diagonal matrices of shape (nports,nports) for each nfreqs
F, G = npy.zeros_like(z), npy.zeros_like(z)
npy.einsum('ijj->ij', F)[...] = 1.0/npy.sqrt(z0.real)*0.5
npy.einsum('ijj->ij', G)[...] = z0
# s = F @ (z - npy.conjugate(G)) @ npy.linalg.inv(z + G) @ npy.linalg.inv(F) # Python > 3.5
s = npy.matmul(F,
npy.matmul((z - npy.conjugate(G)),
npy.matmul(npy.linalg.inv(z + G), npy.linalg.inv(F))))
elif s_def == 'pseudo':
# Pseudo-waves. Eq.(73) from [4]
# Creating diagonal matrices of shape (nports,nports) for each nfreqs
ZR, U = npy.zeros_like(z), npy.zeros_like(z)
npy.einsum('ijj->ij', U)[...] = npy.sqrt(z0.real)/npy.abs(z0)
npy.einsum('ijj->ij', ZR)[...] = z0
# s = U @ (z - ZR) @ npy.linalg.inv(z + ZR) @ npy.linalg.inv(U) # Python > 3.5
s = npy.matmul(U,
npy.matmul((z - ZR),
npy.matmul(npy.linalg.inv(z + ZR), npy.linalg.inv(U))))
elif s_def == 'traveling':
# Traveling-waves definition. Cf.Wikipedia "Impedance parameters" page.
# Creating Identity matrices of shape (nports,nports) for each nfreqs
Id = npy.zeros_like(z) # (nfreqs, nports, nports)
npy.einsum('ijj->ij', Id)[...] = 1.0
# Creating diagonal matrices of shape (nports, nports) for each nfreqs
sqrty0 = npy.zeros_like(z) # (nfreqs, nports, nports)
npy.einsum('ijj->ij', sqrty0)[...] = npy.sqrt(1.0/z0)
# z -> s
s = npy.zeros_like(z)
# s = (sqrty0 @ z @ sqrty0 - Id) @ npy.linalg.inv(sqrty0 @ z @ sqrty0 + Id) # Python>3.5
s = npy.matmul((npy.matmul(npy.matmul(sqrty0, z), sqrty0) - Id),
npy.linalg.inv(npy.matmul(npy.matmul(sqrty0, z), sqrty0) + Id))
return s
def z2y(z):
'''
convert impedance parameters [#]_ to admittance parameters [#]_
.. math::
y = z^{-1}
Parameters
------------
z : complex array-like
impedance parameters
Returns
---------
y : complex array-like
admittance parameters
See Also
----------
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
----------
.. [#] http://en.wikipedia.org/wiki/impedance_parameters
.. [#] http://en.wikipedia.org/wiki/Admittance_parameters
'''
return npy.array([npy.mat(z[f, :, :]) ** -1 for f in xrange(z.shape[0])])
def z2t(z):
'''
Not Implemented yet
convert impedance parameters [#]_ to scattering transfer parameters [#]_
Parameters
------------
z : complex array-like or number
impedance parameters
Returns
---------
s : complex array-like or number
scattering parameters
See Also
----------
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
----------
.. [#] http://en.wikipedia.org/wiki/impedance_parameters
.. [#] http://en.wikipedia.org/wiki/Scattering_transfer_parameters#Scattering_transfer_parameters
'''
raise (NotImplementedError)
def a2s(a, z0=50):
'''
convert abcd parameters to s parameters
Parameters
------------
a : complex array-like
abcd parameters
z0 : complex array-like or number
port impedances
Returns
---------
s : complex array-like
abcd parameters
'''
nfreqs, nports, nports = a.shape
if nports != 2:
raise IndexError('abcd parameters are defined for 2-ports networks only')
z0 = fix_z0_shape(z0, nfreqs, nports)
z01 = z0[:,0]
z02 = z0[:,1]
A = a[:,0,0]
B = a[:,0,1]
C = a[:,1,0]
D = a[:,1,1]
denom = A*z02 + B + C*z01*z02 + D*z01
s = npy.array([
[
(A*z02 + B - C*z01.conj()*z02 - D*z01.conj() ) / denom,
(2*npy.sqrt(z01.real * z02.real)) / denom,
],
[
(2*(A*D - B*C)*npy.sqrt(z01.real * z02.real)) / denom,
(-A*z02.conj() + B - C*z01*z02.conj() + D*z01) / denom,
],
]).transpose()
return s
#return z2s(a2z(a), z0)
def a2z(a):
'''
Converts abcd parameters to z parameters [#]_ .
Parameters
-----------
a : :class:`numpy.ndarray` (shape fx2x2)
abcd parameter matrix
Returns
-------
z : numpy.ndarray
impedance parameters
See Also
---------
inv : calculates inverse s-parameters
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
-----------
.. [#] https://en.wikipedia.org/wiki/Two-port_network
'''
return z2a(a)
def z2a(z):
'''
Converts impedance parameters to abcd parameters [#]_ .
Parameters
-----------
z : :class:`numpy.ndarray` (shape fx2x2)
impedance parameter matrix
Returns
-------
abcd : numpy.ndarray
scattering transfer parameters (aka wave cascading matrix)
See Also
---------
inv : calculates inverse s-parameters
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
-----------
.. [#] https://en.wikipedia.org/wiki/Two-port_network
'''
abcd = npy.array([
[z[:, 0, 0] / z[:, 1, 0],
1. / z[:, 1, 0]],
[(z[:, 0, 0] * z[:, 1, 1] - z[:, 1, 0] * z[:, 0, 1]) / z[:, 1, 0],
z[:, 1, 1] / z[:, 1, 0]],
]).transpose()
return abcd
def s2a(s, z0=50):
'''
Converts scattering parameters to abcd parameters [#]_ .
Parameters
-----------
s : :class:`numpy.ndarray` (shape fx2x2)
impedance parameter matrix
z0: number or, :class:`numpy.ndarray` (shape fx2)
port impedance
Returns
-------
abcd : numpy.ndarray
scattering transfer parameters (aka wave cascading matrix)
'''
nfreqs, nports, nports = s.shape
if nports != 2:
raise IndexError('abcd parameters are defined for 2-ports networks only')
z0 = fix_z0_shape(z0, nfreqs, nports)
z01 = z0[:,0]
z02 = z0[:,1]
denom = (2*s[:,1,0]*npy.sqrt(z01.real * z02.real))
a = npy.array([
[
((z01.conj() + s[:,0,0]*z01)*(1 - s[:,1,1]) + s[:,0,1]*s[:,1,0]*z01) / denom,
((1 - s[:,0,0])*(1 - s[:,1,1]) - s[:,0,1]*s[:,1,0]) / denom,
],
[
((z01.conj() + s[:,0,0]*z01)*(z02.conj() + s[:,1,1]*z02) - s[:,0,1]*s[:,1,0]*z01*z02) / denom,
((1 - s[:,0,0])*(z02.conj() + s[:,1,1]*z02) + s[:,0,1]*s[:,1,0]*z02) / denom,
],
]).transpose()
return a
def y2s(y, z0=50, s_def=S_DEF_DEFAULT):
'''
convert admittance parameters [#]_ to scattering parameters [#]_
For power-waves, from [3]:
.. math::
S = F (1 – G Y) (1 + G Y)^{-1} F^{-1}
where :math:`G = diag([Z_0])` and :math:`F = diag([1/2\\sqrt{|Re(Z_0)|}])`
For pseudo-waves, Eq.(73) from [4]:
.. math::
S = U (Y^{-1} - G) (Y^{-1} + G)^{-1} U^{-1}
where :math:`U = \\sqrt{Re(Z_0)}/|Z_0|`
Parameters
------------
y : complex array-like
admittance parameters
z0 : complex array-like or number
port impedances
s_def : str -> s_def : can be: 'power', 'pseudo' or 'traveling'
Scattering parameter definition : 'power' for power-waves definition [3],
'pseudo' for pseudo-waves definition [4].
'traveling' corresponds to the initial implementation.
Default is 'power'.
Returns
---------
s : complex array-like or number
scattering parameters
See Also
----------
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
----------
.. [#] http://en.wikipedia.org/wiki/Admittance_parameters
.. [#] http://en.wikipedia.org/wiki/S-parameters
.. [3] Kurokawa, Kaneyuki "Power waves and the scattering matrix", IEEE Transactions on Microwave Theory and Techniques, vol.13, iss.2, pp. 194–202, March 1965.
.. [4] Marks, R. B. and Williams, D. F. "A general waveguide circuit theory", Journal of Research of National Institute of Standard and Technology, vol.97, iss.5, pp. 533–562, 1992.
'''
nfreqs, nports, nports = y.shape
z0 = fix_z0_shape(z0, nfreqs, nports)
# Add a small real part in case of pure imaginary char impedance
# to prevent numerical errors for both pseudo and power waves definitions
z0 = z0.astype(dtype=npy.complex)
z0[z0.real == 0] += ZERO
# The following is a vectorized version of a for loop for all frequencies.
# Creating Identity matrices of shape (nports,nports) for each nfreqs
Id = npy.zeros_like(y) # (nfreqs, nports, nports)
npy.einsum('ijj->ij', Id)[...] = 1.0
if s_def == 'power':
# Creating diagonal matrices of shape (nports,nports) for each nfreqs
F, G = npy.zeros_like(y), npy.zeros_like(y)
npy.einsum('ijj->ij', F)[...] = 1.0/npy.sqrt(z0.real)*0.5
npy.einsum('ijj->ij', G)[...] = z0
# s = F @ (Id - npy.conjugate(G) @ y) @ npy.linalg.inv(Id + G @ y) @ npy.linalg.inv(F) # Python > 3.5
s = npy.matmul(F,
npy.matmul((Id - npy.matmul(npy.conjugate(G), y)),
npy.matmul(npy.linalg.inv(Id + npy.matmul(G, y)), npy.linalg.inv(F))))
elif s_def == 'pseudo':
# Pseudo-waves
# Creating diagonal matrices of shape (nports,nports) for each nfreqs
ZR, U = npy.zeros_like(y), npy.zeros_like(y)
npy.einsum('ijj->ij', U)[...] = npy.sqrt(z0.real)/npy.abs(z0)
npy.einsum('ijj->ij', ZR)[...] = z0
# s = U @ (npy.linalg.inv(y) - ZR) @ npy.linalg.inv(npy.linalg.inv(y) + ZR) @ npy.linalg.inv(U) # Python > 3.5
s = npy.matmul(U,
npy.matmul((npy.linalg.inv(y) - ZR),
npy.matmul(npy.linalg.inv(npy.linalg.inv(y) + ZR), npy.linalg.inv(U))))
elif s_def == 'traveling':
# Traveling-waves definition. Cf.Wikipedia "Impedance parameters" page.
# Creating diagonal matrices of shape (nports, nports) for each nfreqs
sqrtz0 = npy.zeros_like(y) # (nfreqs, nports, nports)
npy.einsum('ijj->ij', sqrtz0)[...] = npy.sqrt(z0)
# y -> s
s = npy.zeros_like(y)
# s = (Id - sqrtz0 @ y @ sqrtz0) @ npy.linalg.inv(Id + sqrtz0 @ y @ sqrtz0) # Python>3.5
s = npy.matmul( Id - npy.matmul(npy.matmul(sqrtz0, y), sqrtz0),
npy.linalg.inv(Id + npy.matmul(npy.matmul(sqrtz0, y), sqrtz0)))
return s
def y2z(y):
'''
convert admittance parameters [#]_ to impedance parameters [#]_
.. math::
z = y^{-1}
Parameters
------------
y : complex array-like
admittance parameters
Returns
---------
z : complex array-like
impedance parameters
See Also
----------
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
----------
.. [#] http://en.wikipedia.org/wiki/Admittance_parameters
.. [#] http://en.wikipedia.org/wiki/impedance_parameters
'''
return npy.array([npy.mat(y[f, :, :]) ** -1 for f in xrange(y.shape[0])])
def y2t(y):
'''
Not Implemented Yet
convert admittance parameters [#]_ to scattering-transfer parameters [#]_
Parameters
------------
y : complex array-like or number
impedance parameters
Returns
---------
t : complex array-like or number
scattering parameters
See Also
----------
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
----------
.. [#] http://en.wikipedia.org/wiki/Admittance_parameters
.. [#] http://en.wikipedia.org/wiki/Scattering_transfer_parameters#Scattering_transfer_parameters
'''
raise (NotImplementedError)
def t2s(t):
'''
converts scattering transfer parameters [#]_ to scattering parameters [#]_
transfer parameters are also referred to as
'wave cascading matrix', this function only operates on 2N-ports
networks with same number of input and output ports, also known as
'balanced networks'.
Parameters
-----------
t : :class:`numpy.ndarray` (shape fx2nx2n)
scattering transfer parameters
Returns
-------
s : :class:`numpy.ndarray`
scattering parameter matrix.
See Also
---------
inv : calculates inverse s-parameters
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
-----------
.. [#] http://en.wikipedia.org/wiki/Scattering_transfer_parameters#Scattering_transfer_parameters
.. [#] http://en.wikipedia.org/wiki/S-parameters
.. [#] Janusz A. Dobrowolski, "Scattering Parameter in RF and Microwave Circuit Analysis and Design",
Artech House, 2016, pp. 65-68
'''
z, y, x = t.shape
# test here for even number of ports.
# t-parameter networks are square matrix, so x and y are equal.
if(x % 2 != 0):
raise IndexError('Network don\'t have an even number of ports')
s = npy.zeros((z, y, x), dtype=complex)
yh = int(y/2)
xh = int(x/2)
# T_II,II^-1
tinv = npy.linalg.inv(t[:, yh:y, xh:x])
# np.linalg.inv test for singularity (matrix not invertible)
for k in range(len(s)):
# S_I,I = T_I,II . T_II,II^-1
s[k, 0:yh, 0:xh] = t[k, 0:yh, xh:x].dot(tinv[k])
# S_I,II = T_I,I - T_I,I,II . T_II,II^-1 . T_II,I
s[k, 0:yh, xh:x] = t[k, 0:yh, 0:xh]-t[k, 0:yh, xh:x].dot(tinv[k].dot(t[k, yh:y, 0:xh]))
# S_II,I = T_II,II^-1
s[k, yh:y, 0:xh] = tinv[k]
# S_II,II = -T_II,II^-1 . T_II,I
s[k, yh:y, xh:x] = -tinv[k].dot(t[k, yh:y, 0:xh])
return s
def t2z(t):
'''
Not Implemented Yet
Convert scattering transfer parameters [#]_ to impedance parameters [#]_
Parameters
------------
t : complex array-like or number
impedance parameters
Returns
---------
z : complex array-like or number
scattering parameters
See Also
----------
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
----------
.. [#] http://en.wikipedia.org/wiki/Scattering_transfer_parameters#Scattering_transfer_parameters
.. [#] http://en.wikipedia.org/wiki/impedance_parameters
'''
raise (NotImplementedError)
def t2y(t):
'''
Not Implemented Yet
Convert scattering transfer parameters to admittance parameters [#]_
Parameters
------------
t : complex array-like or number
t-parameters
Returns
---------
y : complex array-like or number
admittance parameters
See Also
----------
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
----------
.. [#] http://en.wikipedia.org/wiki/Scattering_transfer_parameters#Scattering_transfer_parameters
'''
raise (NotImplementedError)
def h2z(h):
'''
Converts hybrid parameters to z parameters [#]_ .
Parameters
-----------
h : :class:`numpy.ndarray` (shape fx2x2)
hybrid parameter matrix
Returns
-------
z : numpy.ndarray
impedance parameters
See Also
---------
inv : calculates inverse s-parameters
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
-----------
.. [#] https://en.wikipedia.org/wiki/Two-port_network
'''
return z2h(h)
def h2s(h, z0=50):
'''
convert hybrid parameters to s parameters
Parameters
------------
h : complex array-like
hybrid parameters
z0 : complex array-like or number
port impedances
Returns
---------
s : complex array-like
scattering parameters
'''
return z2s(h2z(h), z0)
def s2h(s, z0=50):
'''
Convert scattering parameters [1]_ to hybrid parameters
Parameters
------------
s : complex array-like
scattering parameters
z0 : complex array-like or number
port impedances.
Returns
---------
h : complex array-like
hybrid parameters
References
----------
.. [1] http://en.wikipedia.org/wiki/S-parameters
.. [2] http://en.wikipedia.org/wiki/Two-port_network#Hybrid_parameters_(h-parameters)
'''
return z2h(s2z(s, z0))
def z2h(z):
'''
Converts impedance parameters to hybrid parameters [#]_ .
Parameters
-----------
z : :class:`numpy.ndarray` (shape fx2x2)
impedance parameter matrix
Returns
-------
h : numpy.ndarray
hybrid parameters
See Also
---------
inv : calculates inverse s-parameters
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
-----------
.. [#] https://en.wikipedia.org/wiki/Two-port_network
'''
h = npy.array([
[(z[:, 0, 0] * z[:, 1, 1] - z[:, 1, 0] * z[:, 0, 1]) / z[:, 1, 1],
-z[:, 1, 0] / z[:, 1, 1]],
[z[:, 0, 1] / z[:, 1, 1],
1. / z[:, 1, 1]],
]).transpose()
return h
## these methods are used in the secondary properties
def passivity(s):
'''
Passivity metric for a multi-port network.
A metric which is proportional to the amount of power lost in a
multiport network, depending on the excitation port. Specifically,
this returns a matrix who's diagonals are equal to the total
power received at all ports, normalized to the power at a single
excitement port.
mathmatically, this is a test for unitary-ness of the
s-parameter matrix [#]_.
for two port this is
.. math::
\sqrt( |S_{11}|^2 + |S_{21}|^2 \, , \, |S_{22}|^2+|S_{12}|^2)
in general it is
.. math::
\\sqrt( S^H \\cdot S)
where :math:`H` is conjugate transpose of S, and :math:`\\cdot`
is dot product.
Notes
---------
The total amount of power disipated in a network depends on the
port matches. For example, given a matched attenuator, this metric
will yield the attenuation value. However, if the attenuator is
cascaded with a mismatch, the power disipated will not be equivalent
to the attenuator value, nor equal for each excitation port.
Returns
---------
passivity : :class:`numpy.ndarray` of shape fxnxn
References
------------
.. [#] http://en.wikipedia.org/wiki/Scattering_parameters#Lossless_networks
'''
if s.shape[-1] == 1:
raise (ValueError('Doesn\'t exist for one ports'))
pas_mat = s.copy()
for f in range(len(s)):
pas_mat[f, :, :] = npy.sqrt(npy.dot(s[f, :, :].conj().T, s[f, :, :]))
return pas_mat
def reciprocity(s):
'''
Reciprocity metric for a multi-port network.
This returns the magnitude of the difference between the
s-parameter matrix and its transpose.
for two port this is
.. math::
| S - S^T |
where :math:`T` is transpose of S
Returns
---------
reciprocity : :class:`numpy.ndarray` of shape fxnxn
'''
if s.shape[-1] == 1:
raise (ValueError('Doesn\'t exist for one ports'))
rec_mat = s.copy()
for f in range(len(s)):
rec_mat[f, :, :] = abs(s[f, :, :] - s[f, :, :].T)
return rec_mat
## renormalize
def renormalize_s(s, z_old, z_new, s_def=S_DEF_DEFAULT):
'''
Renormalize a s-parameter matrix given old and new port impedances
In the Parameters descriptions, F,N,N = shape(s).
Notes
------
This re-normalization assumes power-wave formulation per default.
To use the pseudo-wave formulation, use s_def='pseudo'.
However, results should be the same for real-valued characteristic impedances.
See the [1]_ and [2]_ for more details.
Parameters
---------------
s : complex array of shape FxNxN
s-parameter matrix
z_old : complex array of shape FxN, F, N or a scalar
old (original) port impedances
z_new : complex array of shape FxN, F, N or a scalar
new port impedances
s_def : str -> s_def : can be: 'power', 'pseudo' or 'traveling'
Scattering parameter definition : 'power' for power-waves definition,
'pseudo' for pseudo-waves definition.
'traveling' corresponds to the initial implementation.
Default is 'power'.
NB: results are the same for real-valued characteristic impedances.
Notes
------
The impedance renormalization. This just calls ::
z2s(s2z(s,z0 = z_old), z0 = z_new)
However, you can see ref [1]_ or [2]_ for some theoretical background.
See Also
--------
Network.renormalize : method of Network to renormalize s
fix_z0_shape
ssz
z2s
References
-------------
.. [1] R. B. Marks and D. F. Williams, "A general waveguide circuit theory," Journal of Research of the National Institute of Standards and Technology, vol. 97, no. 5, pp. 533-561, 1992.
.. [2] Anritsu Application Note: Arbitrary Impedance, https://web.archive.org/web/20200111134414/https://archive.eetasia.com/www.eetasia.com/ARTICLES/2002MAY/2002MAY02_AMD_ID_NTES_AN.PDF?SOURCES=DOWNLOAD
Examples
------------
>>> s = zeros(shape=(101,2,2))
>>> renormalize_s(s, 50,25)
'''
if s_def not in S_DEFINITIONS:
raise ValueError('s_def parameter should be either:', S_DEFINITIONS)
# thats a heck of a one-liner!
return z2s(s2z(s, z0=z_old, s_def=s_def), z0=z_new, s_def=s_def)
def fix_z0_shape(z0, nfreqs, nports):
'''
Make a port impedance of correct shape for a given network's matrix
This attempts to broadcast z0 to satisfy
npy.shape(z0) == (nfreqs,nports)
Parameters
--------------
z0 : number, array-like
z0 can be:
* a number (same at all ports and frequencies)
* an array-like of length == number ports.
* an array-like of length == number frequency points.
* the correct shape ==(nfreqs,nports)
nfreqs : int
number of frequency points
nports : int
number of ports
Returns
----------
z0 : array of shape ==(nfreqs,nports)
z0 with the right shape for a nport Network
Examples
----------
For a two-port network with 201 frequency points, possible uses may
be
>>> z0 = rf.fix_z0_shape(50 , 201,2)
>>> z0 = rf.fix_z0_shape([50,25] , 201,2)
>>> z0 = rf.fix_z0_shape(range(201) , 201,2)
'''
if npy.shape(z0) == (nfreqs, nports):
# z0 is of correct shape. super duper.return it quick.
return z0.copy()
elif npy.isscalar(z0):
# z0 is a single number
return npy.array(nfreqs * [nports * [z0]])
elif len(z0) == nports:
# assume z0 is a list of impedances for each port,
# but constant with frequency
return npy.array(nfreqs * [z0])
elif len(z0) == nfreqs:
# assume z0 is a list of impedances for each frequency,
# but constant with respect to ports
return npy.array(nports * [z0]).T
else:
raise IndexError('z0 is not an acceptable shape')
## cascading assistance functions
def inv(s):
'''
Calculates 'inverse' s-parameter matrix, used for de-embedding
This is not literally the inverse of the s-parameter matrix.
Instead, it is defined such that the inverse of the s-matrix cascaded with itself is a unity scattering transfer parameter (T) matrix.
.. math::
inv(s) = t2s({s2t(s)}^{-1})
where :math:`x^{-1}` is the matrix inverse. In words, this
is the inverse of the scattering transfer parameters matrix
transformed into a scattering parameters matrix.
Parameters
-----------
s : :class:`numpy.ndarray` (shape fx2nx2n)
scattering parameter matrix.
Returns
-------
s' : :class:`numpy.ndarray`
inverse scattering parameter matrix.
See Also
---------
t2s : converts scattering transfer parameters to scattering parameters
s2t : converts scattering parameters to scattering transfer parameters
'''
# this idea is from lihan
t = s2t(s)
tinv = npy.linalg.inv(t)
sinv = t2s(tinv)
#for f in range(len(i)):
# i[f, :, :] = npy.linalg.inv(i[f, :, :]) # could also be written as
# # npy.mat(i[f,:,:])**-1 -- Trey
return sinv
def flip(a):
'''
invert the ports of a networks s-matrix, 'flipping' it over left and right.
in case the network is 2n-port and n > 1, 'second' numbering scheme is
assumed to be consistent with the ** cascade operator.
::
-|0 n|- 0-|n 0|-n
-|1 n+1|- flip 1-|n+1 1|-n+1
... ... => ... ...
-|n-1 2n-1|- n-1-|2n-1 n-1|-2n-1
Parameters
-----------
a : :class:`numpy.ndarray`
scattering parameter matrix. shape should be should be 2nx2n, or
fx2nx2n
Returns
-------
a' : :class:`numpy.ndarray`
flipped scattering parameter matrix, ie interchange of port 0
and port 1
Note
-----
See renumber
'''
c = a.copy()
n2 = a.shape[-1]
m2 = a.shape[-2]
n = int(n2/2)
if (n2 == m2) and (n2 % 2 == 0):
old = list(range(0,2*n))
new = list(range(n,2*n)) + list(range(0,n))
if(len(a.shape) == 2):
c[new, :] = c[old, :] # renumber rows
c[:, new] = c[:, old] # renumber columns
else:
c[:, new, :] = c[:, old, :] # renumber rows
c[:, :, new] = c[:, :, old] # renumber columns
else:
raise IndexError('matrices should be 2nx2n, or kx2nx2n')
return c
## COMMON CHECKS (raise exceptions)
def check_frequency_equal(ntwkA, ntwkB):
'''
checks if two Networks have same frequency
'''
if assert_frequency_equal(ntwkA, ntwkB) == False:
raise IndexError('Networks don\'t have matching frequency. See `Network.interpolate`')
def check_z0_equal(ntwkA, ntwkB):
'''
checks if two Networks have same port impedances
'''
# note you should check frequency equal before you call this
if assert_z0_equal(ntwkA, ntwkB) == False:
raise ValueError('Networks don\'t have matching z0.')
def check_nports_equal(ntwkA, ntwkB):
'''
checks if two Networks have same number of ports
'''
if assert_nports_equal(ntwkA, ntwkB) == False:
raise ValueError('Networks don\'t have matching number of ports.')
## TESTs (return [usually boolean] values)
def assert_frequency_equal(ntwkA, ntwkB):
'''
'''
return (ntwkA.frequency == ntwkB.frequency)
def assert_z0_equal(ntwkA, ntwkB):
'''
'''
return (ntwkA.z0 == ntwkB.z0).all()
def assert_z0_at_ports_equal(ntwkA, k, ntwkB, l):
'''
'''
return (ntwkA.z0[:, k] == ntwkB.z0[:, l]).all()
def assert_nports_equal(ntwkA, ntwkB):
'''
'''
return (ntwkA.number_of_ports == ntwkB.number_of_ports)
## Other
# don't belong here, but i needed them quickly
# this is needed for port impedance mismatches
def impedance_mismatch(z1, z2):
'''
creates a two-port s-matrix for a impedance mis-match
Parameters
-----------
z1 : number or array-like
complex impedance of port 1
z2 : number or array-like
complex impedance of port 2
Returns
---------
s' : 2-port s-matrix for the impedance mis-match
'''
gamma = zl_2_Gamma0(z1, z2)
result = npy.zeros(shape=(len(gamma), 2, 2), dtype='complex')
result[:, 0, 0] = gamma
result[:, 1, 1] = -gamma
result[:, 1, 0] = (1 + gamma) * npy.sqrt(1.0 * z1 / z2)
result[:, 0, 1] = (1 - gamma) * npy.sqrt(1.0 * z2 / z1)
return result
def two_port_reflect(ntwk1, ntwk2=None):
'''
Generates a two-port reflective two-port, from two one-ports.
Parameters
----------
ntwk1 : one-port Network object
network seen from port 1
ntwk2 : one-port Network object, or None
network seen from port 2. if None then will use ntwk1.
Returns
-------
result : Network object
two-port reflective network
Notes
-------
The resultant Network is copied from `ntwk1`, so its various
properties(name, frequency, etc) are inherited from that Network.
Examples
---------
>>> short,open = rf.Network('short.s1p', rf.Network('open.s1p')
>>> rf.two_port_reflect(short,open)
'''
result = ntwk1.copy()
if ntwk2 is None:
ntwk2 = ntwk1
s11 = ntwk1.s[:, 0, 0]
s22 = ntwk2.s[:, 0, 0]
s21 = npy.zeros(ntwk1.frequency.npoints, dtype=complex)
result.s = npy.array( \
[[s11, s21], \
[s21, s22]]). \
transpose().reshape(-1, 2, 2)
result.z0 = npy.hstack([ntwk1.z0, ntwk2.z0])
try:
result.name = ntwk1.name + '-' + ntwk2.name
except(TypeError):
pass
return result
def s2s_active(s, a):
'''
Returns active s-parameters for a defined wave excitation a.
The active s-parameter at a port is the reflection coefficients
when other ports are excited. It is an important quantity for active
phased array antennas.
Active s-parameters are defined by [#]_:
.. math::
\mathrm{active}(s)_{m} = \sum_{i=1}^N\left( s_{mi} a_i \right) / a_m
where :math:`s` are the scattering parameters and :math:`N` the number of ports
Parameters
----------
s : complex array
scattering parameters (nfreqs, nports, nports)
a : complex array of shape (n_ports)
forward wave complex amplitude (pseudowave formulation [#]_)
Returns
---------
s_act : complex array of shape (n_freqs, n_ports)
active S-parameters for the excitation a
See Also
-----------
s2z_active : active Z-parameters
s2y_active : active Y-parameters
s2vswr_active : active VSWR
References
----------
.. [#] D. M. Pozar, IEEE Trans. Antennas Propag. 42, 1176 (1994).
.. [#] D. Williams, IEEE Microw. Mag. 14, 38 (2013).
'''
# TODO : vectorize the for loop
nfreqs, nports, nports = s.shape
s_act = npy.zeros((nfreqs, nports), dtype='complex')
s[ s == 0 ] = 1e-12 # solve numerical singularity
for fidx in xrange(s.shape[0]):
s_act[fidx] = npy.matmul(s[fidx], a) / a
return s_act # shape : (n_freqs, n_ports)
def s2z_active(s, z0, a):
'''
Returns the active Z-parameters for a defined wave excitation a.
The active Z-parameters are defined by:
.. math::
\mathrm{active}(z)_{m} = z_{0,m} \frac{1 + \mathrm{active}(s)_m}{1 - \mathrm{active}(s)_m}
where :math:`z_{0,m}` is the characteristic impedance and
:math:`\mathrm{active}(s)_m` the active S-parameter of port :math:`m`.
Parameters
----------
s : complex array
scattering parameters (nfreqs, nports, nports)
z0 : complex array-like or number
port impedances.
a : complex array of shape (n_ports)
forward wave complex amplitude
Returns
----------
z_act : complex array of shape (nfreqs, nports)
active Z-parameters for the excitation a
See Also
-----------
s2s_active : active S-parameters
s2y_active : active Y-parameters
s2vswr_active : active VSWR
'''
# TODO : vectorize the for loop
nfreqs, nports, nports = s.shape
z0 = fix_z0_shape(z0, nfreqs, nports)
z_act = npy.zeros((nfreqs, nports), dtype='complex')
s_act = s2s_active(s, a)
for fidx in xrange(s.shape[0]):
z_act[fidx] = z0[fidx] * (1 + s_act[fidx])/(1 - s_act[fidx])
return z_act
def s2y_active(s, z0, a):
'''
Returns the active Y-parameters for a defined wave excitation a.
The active Y-parameters are defined by:
.. math::
\mathrm{active}(y)_{m} = y_{0,m} \frac{1 - \mathrm{active}(s)_m}{1 + \mathrm{active}(s)_m}
where :math:`y_{0,m}` is the characteristic admittance and
:math:`\mathrm{active}(s)_m` the active S-parameter of port :math:`m`.
Parameters
----------
s : complex array
scattering parameters (nfreqs, nports, nports)
z0 : complex array-like or number
port impedances.
a : complex array of shape (n_ports)
forward wave complex amplitude
Returns
----------
y_act : complex array of shape (nfreqs, nports)
active Y-parameters for the excitation a
See Also
-----------
s2s_active : active S-parameters
s2z_active : active Z-parameters
s2vswr_active : active VSWR
'''
nfreqs, nports, nports = s.shape
z0 = fix_z0_shape(z0, nfreqs, nports)
y_act = npy.zeros((nfreqs, nports), dtype='complex')
s_act = s2s_active(s, a)
for fidx in xrange(s.shape[0]):
y_act[fidx] = 1/z0[fidx] * (1 - s_act[fidx])/(1 + s_act[fidx])
return y_act
def s2vswr_active(s, a):
'''
Returns the active VSWR for a defined wave excitation a..
The active VSWR is defined by :
.. math::
\mathrm{active}(vswr)_{m} = \frac{1 + |\mathrm{active}(s)_m|}{1 - |\mathrm{active}(s)_m|}
where :math:`\mathrm{active}(s)_m` the active S-parameter of port :math:`m`.
Parameters
----------
s : complex array
scattering parameters (nfreqs, nports, nports)
a : complex array of shape (n_ports)
forward wave complex amplitude
Returns
----------
vswr_act : complex array of shape (nfreqs, nports)
active VSWR for the excitation a
See Also
-----------
s2s_active : active S-parameters
s2z_active : active Z-parameters
s2y_active : active Y-parameters
'''
nfreqs, nports, nports = s.shape
vswr_act = npy.zeros((nfreqs, nports), dtype='complex')
s_act = s2s_active(s, a)
for fidx in xrange(s.shape[0]):
vswr_act[fidx] = (1 + npy.abs(s_act[fidx]))/(1 - npy.abs(s_act[fidx]))
return vswr_act
|
bsd-3-clause
|
ishanic/scikit-learn
|
sklearn/feature_selection/tests/test_rfe.py
|
209
|
11733
|
"""
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
|
bsd-3-clause
|
jreback/pandas
|
pandas/tests/indexes/multi/test_equivalence.py
|
2
|
8873
|
import numpy as np
import pytest
import pandas as pd
from pandas import Index, MultiIndex, Series
import pandas._testing as tm
def test_equals(idx):
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert idx.equals(idx.to_flat_index())
assert idx.equals(idx.to_flat_index().astype("category"))
assert not idx.equals(list(idx))
assert not idx.equals(np.array(idx))
same_values = Index(idx, dtype=object)
assert idx.equals(same_values)
assert same_values.equals(idx)
if idx.nlevels == 1:
# do not test MultiIndex
assert not idx.equals(Series(idx))
def test_equals_op(idx):
# GH9947, GH10637
index_a = idx
n = len(index_a)
index_b = index_a[0:-1]
index_c = index_a[0:-1].append(index_a[-2:-1])
index_d = index_a[0:1]
with pytest.raises(ValueError, match="Lengths must match"):
index_a == index_b
expected1 = np.array([True] * n)
expected2 = np.array([True] * (n - 1) + [False])
tm.assert_numpy_array_equal(index_a == index_a, expected1)
tm.assert_numpy_array_equal(index_a == index_c, expected2)
# test comparisons with numpy arrays
array_a = np.array(index_a)
array_b = np.array(index_a[0:-1])
array_c = np.array(index_a[0:-1].append(index_a[-2:-1]))
array_d = np.array(index_a[0:1])
with pytest.raises(ValueError, match="Lengths must match"):
index_a == array_b
tm.assert_numpy_array_equal(index_a == array_a, expected1)
tm.assert_numpy_array_equal(index_a == array_c, expected2)
# test comparisons with Series
series_a = Series(array_a)
series_b = Series(array_b)
series_c = Series(array_c)
series_d = Series(array_d)
with pytest.raises(ValueError, match="Lengths must match"):
index_a == series_b
tm.assert_numpy_array_equal(index_a == series_a, expected1)
tm.assert_numpy_array_equal(index_a == series_c, expected2)
# cases where length is 1 for one of them
with pytest.raises(ValueError, match="Lengths must match"):
index_a == index_d
with pytest.raises(ValueError, match="Lengths must match"):
index_a == series_d
with pytest.raises(ValueError, match="Lengths must match"):
index_a == array_d
msg = "Can only compare identically-labeled Series objects"
with pytest.raises(ValueError, match=msg):
series_a == series_d
with pytest.raises(ValueError, match="Lengths must match"):
series_a == array_d
# comparing with a scalar should broadcast; note that we are excluding
# MultiIndex because in this case each item in the index is a tuple of
# length 2, and therefore is considered an array of length 2 in the
# comparison instead of a scalar
if not isinstance(index_a, MultiIndex):
expected3 = np.array([False] * (len(index_a) - 2) + [True, False])
# assuming the 2nd to last item is unique in the data
item = index_a[-2]
tm.assert_numpy_array_equal(index_a == item, expected3)
tm.assert_series_equal(series_a == item, Series(expected3))
def test_compare_tuple():
# GH#21517
mi = MultiIndex.from_product([[1, 2]] * 2)
all_false = np.array([False, False, False, False])
result = mi == mi[0]
expected = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = mi != mi[0]
tm.assert_numpy_array_equal(result, ~expected)
result = mi < mi[0]
tm.assert_numpy_array_equal(result, all_false)
result = mi <= mi[0]
tm.assert_numpy_array_equal(result, expected)
result = mi > mi[0]
tm.assert_numpy_array_equal(result, ~expected)
result = mi >= mi[0]
tm.assert_numpy_array_equal(result, ~all_false)
def test_compare_tuple_strs():
# GH#34180
mi = MultiIndex.from_tuples([("a", "b"), ("b", "c"), ("c", "a")])
result = mi == ("c", "a")
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = mi == ("c",)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(result, expected)
def test_equals_multi(idx):
assert idx.equals(idx)
assert not idx.equals(idx.values)
assert idx.equals(Index(idx.values))
assert idx.equal_levels(idx)
assert not idx.equals(idx[:-1])
assert not idx.equals(idx[-1])
# different number of levels
index = MultiIndex(
levels=[Index(list(range(4))), Index(list(range(4))), Index(list(range(4)))],
codes=[
np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0]),
],
)
index2 = MultiIndex(levels=index.levels[:-1], codes=index.codes[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(list(range(4)))
minor_axis = Index(list(range(2)))
major_codes = np.array([0, 0, 1, 2, 2, 3])
minor_codes = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(
levels=[major_axis, minor_axis], codes=[major_codes, minor_codes]
)
assert not idx.equals(index)
assert not idx.equal_levels(index)
# some of the labels are different
major_axis = Index(["foo", "bar", "baz", "qux"])
minor_axis = Index(["one", "two"])
major_codes = np.array([0, 0, 2, 2, 3, 3])
minor_codes = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(
levels=[major_axis, minor_axis], codes=[major_codes, minor_codes]
)
assert not idx.equals(index)
def test_identical(idx):
mi = idx.copy()
mi2 = idx.copy()
assert mi.identical(mi2)
mi = mi.set_names(["new1", "new2"])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(["new1", "new2"])
assert mi.identical(mi2)
with tm.assert_produces_warning(FutureWarning):
# subclass-specific keywords to pd.Index
mi3 = Index(mi.tolist(), names=mi.names)
msg = r"Unexpected keyword arguments {'names'}"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning):
# subclass-specific keywords to pd.Index
Index(mi.tolist(), names=mi.names, tupleize_cols=False)
mi4 = Index(mi.tolist(), tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_equals_operator(idx):
# GH9785
assert (idx == idx).all()
def test_equals_missing_values():
# make sure take is not using -1
i = MultiIndex.from_tuples([(0, pd.NaT), (0, pd.Timestamp("20130101"))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_equals_missing_values_differently_sorted():
# GH#38439
mi1 = pd.MultiIndex.from_tuples([(81.0, np.nan), (np.nan, np.nan)])
mi2 = pd.MultiIndex.from_tuples([(np.nan, np.nan), (81.0, np.nan)])
assert not mi1.equals(mi2)
mi2 = pd.MultiIndex.from_tuples([(81.0, np.nan), (np.nan, np.nan)])
assert mi1.equals(mi2)
def test_is_():
mi = MultiIndex.from_tuples(zip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert not mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([list(range(10)), list(range(10))])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
with tm.assert_produces_warning(FutureWarning):
mi4.set_levels([list(range(10)), list(range(10))], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
with tm.assert_produces_warning(FutureWarning):
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_is_all_dates(idx):
assert not idx._is_all_dates
def test_is_numeric(idx):
# MultiIndex is never numeric
assert not idx.is_numeric()
def test_multiindex_compare():
# GH 21149
# Ensure comparison operations for MultiIndex with nlevels == 1
# behave consistently with those for MultiIndex with nlevels > 1
midx = MultiIndex.from_product([[0, 1]])
# Equality self-test: MultiIndex object vs self
expected = Series([True, True])
result = Series(midx == midx)
tm.assert_series_equal(result, expected)
# Greater than comparison: MultiIndex object vs self
expected = Series([False, False])
result = Series(midx > midx)
tm.assert_series_equal(result, expected)
|
bsd-3-clause
|
nvoron23/scikit-learn
|
examples/model_selection/grid_search_text_feature_extraction.py
|
253
|
4158
|
"""
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
|
bsd-3-clause
|
Evpok/krampouezh
|
libkrampouezh/libinterpol.py
|
1
|
4441
|
'''
@author: Loïc Grobol <[email protected]>
Copyright © 2014, Loïc Grobol <[email protected]>
Permission is granted to Do What The Fuck You Want To
with this document.
See the WTF Public License, Version 2 as published by Sam Hocevar
at http://www.wtfpl.net if you need more details.
This provides the maths for the interpolation, and a few
goodies as well, such as a crude graphical output using matplotlib
shamelessly stolen in [scipy's doc][1].
There is also a naive implementation of the cubic interpolation
using the method described in [Piecewise polynomial interpolation][IWA13].
[1]: http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html#scipy.interpolate.interp1d
[IWA13]: http://www.opengamma.com/blog/piecewise-polynomial-interpolation
'''
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from scipy.sparse.linalg import spsolve
def cubic_interpol(points: '((x₀,y₀),(x₁,y₁),…)'):
'''Return the piecewise cubic interpolation of `points`.'''
x, y = zip(*points)
return interp1d(x, y, kind='cubic')
def cubic_coefs(points: '((x₀,y₀),(x₁,y₁),…)') -> '((a₀¹,a₁¹,a₂¹,a₃¹),(a₀²,a₁²,a₂²,a₃³),…)':
'''Return the coefficients of the piecewise natural cubic interpolation of `points`.
Based on [Piecewise polynomial interpolation][IWA13].
[IWA13]: http://www.opengamma.com/blog/piecewise-polynomial-interpolation'''
x, y = map(np.array,zip(*sorted(points)))
h = x[1:]-x[:-1]
s = (y[1:]-y[:-1])/h
sub = np.concatenate((h[:-1],[0]))
super = np.concatenate(([0],h[1:]))
main = np.concatenate(([1],2*(h[:-1]+h[1:]),[1]))
A = sp.sparse.diags((sub,main,super),(-1,0,1),format='csr')
b = 6*np.concatenate(([0],s[1:]-s[:-1],[0]))
m = spsolve(A,b)
a0 = y
a1 = s - m[:-1]*h/2 - h*(m[1:]-m[:-1])/6
a2 = m/2
a3 = (m[1:]-m[:-1])/(6*h)
return zip(a0,a1,a2,a3)
def hermite_coefs(points: "((x₀,y₀,y₀'),(x₁,y₁,y₁'),…)") -> '((a₀¹,a₁¹,a₂¹,a₃¹),(a₀²,a₁²,a₂²,a₃³),…)':
'''Return the coefficients of the piecewise cubic hermit interpolation of `points`.
See [the corresponding — and well-written — Wikipedia entry][WIKHE]. From it we can
infer that the interpoling polynomial on $[xᵢ,xᵢ₊₁]$ is
$$P∘\frac{X-xᵢ}{xᵢ₊₁-xᵢ}$$
where $P$ is
$$P = (2yᵢ−2yᵢ₊₁+(xᵢ₊₁−xᵢ)yᵢ'+(xᵢ₊₁−xᵢ)yᵢ₊₁')X³+(−3yᵢ+3yᵢ₊₁-2(xᵢ₊₁−xᵢ)yᵢ'-(xᵢ₊₁−xᵢ)yᵢ₊₁')X²+(xᵢ₊₁−xᵢ)yᵢ'X+yᵢ$$
So the coefficients of $P$ (in decreasing powers) are the components of
$$
[[ 2 -2 1 1] [[yᵢ]
[-3 3 -2 -1] × [yᵢ₊₁]
[ 0 0 1 0] [yᵢ']
[ 1 0 0 0]] [yᵢ₊₁']]
$$
That we vectorise into $H×P$ where $H$ is the above Hermite matrix and $P$ is simply
$$
[[y₀ y₁ …]
[y₁ y₂ …]
[y₀' y₁' …]
[y₁' y₂' …]]
$$
After wich we just renormalise the coefficients by multiplying the first line of $HP$ by $(\frac{X-xᵢ}{xᵢ₊₁-xᵢ})³$,
the second line by$(\frac{X-xᵢ}{xᵢ₊₁-xᵢ})²$… To use them, don't forget to apply the shift, too. E.g if you are
interpoling on $[5,7]$, the coefficients returned by this function are those of the polynomial in $X-5$.
[WIKHE]: https://en.wikipedia.org/wiki/Cubic_Hermite_spline#Interpolation_on_an_arbitrary_interval'''
x, y, t = map(np.array,zip(*sorted(points)))
H = np.array(((2,-2,1,1),(-3,3,-2,-1),(0,0,1,0),(1,0,0,0)))
Δ = x[1:]-x[:-1]
P = np.array((y[:-1],y[1:],Δ*t[:-1],Δ*t[1:]))
coefs = H.dot(P)
normaliser = np.array((1/Δ**3,1/Δ**2,1/Δ,1/Δ**0))
normal_coefs = coefs * normaliser
return (l[::-1] for l in normal_coefs.transpose().tolist())
def plot_interpol(points: '((x₀,y₀),(x₁,y₁),…)', interpol=cubic_interpol, samples=100):
'''Display a crude graphical output of the cubic interpolation using matplotlib.'''
x, y = zip(*points)
f = interpol(points)
xnew = np.linspace(min(x), max(x), samples)
plt.plot(x,y,'o',xnew,f(xnew),'-')
plt.legend(['data', 'cubic'], loc='best')
plt.show()
if __name__=='__main__':
print(list(hermite_coefs(((0,0,1),(5,0,0),(7,1,1)))))
|
cc0-1.0
|
lekshmideepu/nest-simulator
|
pynest/examples/csa_example.py
|
14
|
4565
|
# -*- coding: utf-8 -*-
#
# csa_example.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Using CSA for connection setup
------------------------------
This example sets up a simple network in NEST using the Connection Set
Algebra (CSA) instead of using the built-in connection routines.
Using the CSA requires NEST to be compiled with support for
libneurosim. For details, see [1]_.
See Also
~~~~~~~~
:doc:`csa_spatial_example`
References
~~~~~~~~~~
.. [1] Djurfeldt M, Davison AP and Eppler JM (2014). Efficient generation of
connectivity in neuronal networks from simulator-independent
descriptions, Front. Neuroinform.
http://dx.doi.org/10.3389/fninf.2014.00043
"""
###############################################################################
# First, we import all necessary modules for simulation and plotting.
import nest
from nest import voltage_trace
from nest import visualization
import matplotlib.pyplot as plt
###############################################################################
# Next, we check for the availability of the CSA Python module. If it does
# not import, we exit with an error message.
try:
import csa
haveCSA = True
except ImportError:
print("This example requires CSA to be installed in order to run.\n" +
"Please make sure you compiled NEST using\n" +
" -Dwith-libneurosim=[OFF|ON|</path/to/libneurosim>]\n" +
"and CSA and libneurosim are available.")
import sys
sys.exit(1)
###############################################################################
# To set up the connectivity, we create a ``random`` connection set with a
# probability of 0.1 and two associated values (10000.0 and 1.0) used as
# weight and delay, respectively.
cg = csa.cset(csa.random(0.1), 10000.0, 1.0)
###############################################################################
# Using the ``Create`` command from PyNEST, we create the neurons of the pre-
# and postsynaptic populations, each of which containing 16 neurons.
pre = nest.Create("iaf_psc_alpha", 16)
post = nest.Create("iaf_psc_alpha", 16)
###############################################################################
# We can now connect the populations using the ``Connect`` function
# with the ``conngen`` rule. It takes the IDs of pre- and postsynaptic
# neurons (``pre`` and ``post``), the connection set (``cg``) and a
# dictionary that maps the parameters weight and delay to positions in
# the value set associated with the connection set (``params_map``).
params_map = {"weight": 0, "delay": 1}
connspec = {"rule": "conngen", "cg": cg, "params_map": params_map}
nest.Connect(pre, post, connspec)
###############################################################################
# To stimulate the network, we create a ``poisson_generator`` and set it up to
# fire with a rate of 100000 spikes per second. It is connected to the
# neurons of the pre-synaptic population.
pg = nest.Create("poisson_generator", params={"rate": 100000.0})
nest.Connect(pg, pre, "all_to_all")
###############################################################################
# To measure and record the membrane potentials of the neurons, we create a
# ``voltmeter`` and connect it to all postsynaptic nodes.
vm = nest.Create("voltmeter")
nest.Connect(vm, post, "all_to_all")
###############################################################################
# We save the whole connection graph of the network as a PNG image using the
# ``plot_network`` function of the ``visualization`` submodule of PyNEST.
allnodes = pg + pre + post + vm
visualization.plot_network(allnodes, "csa_example_graph.png")
###############################################################################
# Finally, we simulate the network for 50 ms. The voltage traces of the
# postsynaptic nodes are plotted.
nest.Simulate(50.0)
voltage_trace.from_device(vm)
plt.show()
|
gpl-2.0
|
neuropil/boltons
|
boltons/tableutils.py
|
4
|
18624
|
# -*- coding: utf-8 -*-
"""If there is one recurring theme in ``boltons``, it is that Python
has excellent datastructures that constitute a good foundation for
most quick manipulations, as well as building applications. However,
Python usage has grown much faster than builtin data structure
power. Python has a growing need for more advanced general-purpose
data structures which behave intuitively.
The :class:`Table` class is one example. When handed one- or
two-dimensional data, it can provide useful, if basic, text and HTML
renditions of small to medium sized data. It also heuristically
handles recursive data of various formats (lists, dicts, namedtuples,
objects).
For more advanced :class:`Table`-style manipulation check out the
`pandas`_ DataFrame.
.. _pandas: http://pandas.pydata.org/
"""
from __future__ import print_function
import cgi
import types
from itertools import islice
from collections import Sequence, Mapping, MutableSequence
try:
string_types, integer_types = (str, unicode), (int, long)
except:
# Python 3 compat
unicode = str
string_types, integer_types = (str, bytes), (int,)
try:
from typeutils import make_sentinel
_MISSING = make_sentinel(var_name='_MISSING')
except ImportError:
_MISSING = object()
"""
Some idle feature thoughts:
* shift around column order without rearranging data
* gotta make it so you can add additional items, not just initialize with
* maybe a shortcut would be to allow adding of Tables to other Tables
* what's the perf of preallocating lists and overwriting items versus
starting from empty?
* is it possible to effectively tell the difference between when a
Table is from_data()'d with a single row (list) or with a list of lists?
* CSS: white-space pre-line or pre-wrap maybe?
* Would be nice to support different backends (currently uses lists
exclusively). Sometimes large datasets come in list-of-dicts and
list-of-tuples format and it's desirable to cut down processing overhead.
TODO: make iterable on rows?
"""
__all__ = ['Table']
def to_text(obj, maxlen=None):
try:
text = unicode(obj)
except:
try:
text = unicode(repr(obj))
except:
text = unicode(object.__repr__(obj))
if maxlen and len(text) > maxlen:
text = text[:maxlen - 3] + '...'
# TODO: inverse of ljust/rjust/center
return text
def escape_html(obj, maxlen=None):
text = to_text(obj, maxlen=maxlen)
return cgi.escape(text, quote=True)
_DNR = set((type(None), bool, complex, float,
type(NotImplemented), slice,
types.FunctionType, types.MethodType, types.BuiltinFunctionType,
types.GeneratorType) + string_types + integer_types)
class UnsupportedData(TypeError):
pass
class InputType(object):
def __init__(self, *a, **kw):
pass
def get_entry_seq(self, data_seq, headers):
return [self.get_entry(entry, headers) for entry in data_seq]
class DictInputType(InputType):
def check_type(self, obj):
return isinstance(obj, Mapping)
def guess_headers(self, obj):
return obj.keys()
def get_entry(self, obj, headers):
return [obj.get(h) for h in headers]
def get_entry_seq(self, obj, headers):
return [[ci.get(h) for h in headers] for ci in obj]
class ObjectInputType(InputType):
def check_type(self, obj):
return type(obj) not in _DNR and hasattr(obj, '__class__')
def guess_headers(self, obj):
headers = []
for attr in dir(obj):
# an object's __dict__ could technically have non-string keys
try:
val = getattr(obj, attr)
except:
# seen on greenlet: `run` shows in dir() but raises
# AttributeError. Also properties misbehave.
continue
if callable(val):
continue
headers.append(attr)
return headers
def get_entry(self, obj, headers):
values = []
for h in headers:
try:
values.append(getattr(obj, h))
except:
values.append(None)
return values
# might be better to hardcode list support since it's so close to the
# core or might be better to make this the copy-style from_* importer
# and have the non-copy style be hardcoded in __init__
class ListInputType(InputType):
def check_type(self, obj):
return isinstance(obj, MutableSequence)
def guess_headers(self, obj):
return None
def get_entry(self, obj, headers):
return obj
def get_entry_seq(self, obj_seq, headers):
return obj_seq
class TupleInputType(InputType):
def check_type(self, obj):
return isinstance(obj, tuple)
def guess_headers(self, obj):
return None
def get_entry(self, obj, headers):
return list(obj)
def get_entry_seq(self, obj_seq, headers):
return [list(t) for t in obj_seq]
class NamedTupleInputType(InputType):
def check_type(self, obj):
return hasattr(obj, '_fields') and isinstance(obj, tuple)
def guess_headers(self, obj):
return list(obj._fields)
def get_entry(self, obj, headers):
return [getattr(obj, h, None) for h in headers]
def get_entry_seq(self, obj_seq, headers):
return [[getattr(obj, h, None) for h in headers] for obj in obj_seq]
class Table(object):
"""
This Table class is meant to be simple, low-overhead, and extensible. Its
most common use would be for translation between in-memory data
structures and serialization formats, such as HTML and console-ready text.
As such, it stores data in list-of-lists format, and *does not* copy
lists passed in. It also reserves the right to modify those lists in a
"filling" process, whereby short lists are extended to the width of
the table (usually determined by number of headers). This greatly
reduces overhead and processing/validation that would have to occur
otherwise.
General description of headers behavior:
Headers describe the columns, but are not part of the data, however,
if the *headers* argument is omitted, Table tries to infer header
names from the data. It is possible to have a table with no headers,
just pass in ``headers=None``.
Supported inputs:
* :class:`list` of :class:`list` objects
* :class:`dict` (list/single)
* :class:`object` (list/single)
* :class:`collections.namedtuple` (list/single)
* TODO: DB API cursor?
* TODO: json
Supported outputs:
* HTML
* Pretty text (also usable as GF Markdown)
* TODO: CSV
* TODO: json
* TODO: json lines
To minimize resident size, the Table data is stored as a list of lists.
"""
# order definitely matters here
_input_types = [DictInputType(), ListInputType(),
NamedTupleInputType(), TupleInputType(),
ObjectInputType()]
_html_tr, _html_tr_close = '<tr>', '</tr>'
_html_th, _html_th_close = '<th>', '</th>'
_html_td, _html_td_close = '<td>', '</td>'
# _html_thead, _html_thead_close = '<thead>', '</thead>'
# _html_tfoot, _html_tfoot_close = '<tfoot>', '</tfoot>'
_html_table_tag, _html_table_tag_close = '<table>', '</table>'
def __init__(self, data=None, headers=_MISSING):
if headers is _MISSING:
headers = []
if data:
headers, data = list(data[0]), islice(data, 1, None)
self.headers = headers or []
self._data = []
self._width = 0
self.extend(data)
def extend(self, data):
"""
Append the given data to the end of the Table.
"""
if not data:
return
self._data.extend(data)
self._set_width()
self._fill()
def _set_width(self, reset=False):
if reset:
self._width = 0
if self._width:
return
if self.headers:
self._width = len(self.headers)
return
self._width = max([len(d) for d in self._data])
def _fill(self):
width, filler = self._width, [None]
if not width:
return
for d in self._data:
rem = width - len(d)
if rem > 0:
d.extend(filler * rem)
return
@classmethod
def from_dict(cls, data, headers=_MISSING, max_depth=1):
"""Create a Table from a :class:`dict`. Operates the same as
:meth:`from_data`, but forces interpretation of the data as a
Mapping.
"""
return cls.from_data(data=data, headers=headers,
max_depth=max_depth, _data_type=DictInputType())
@classmethod
def from_list(cls, data, headers=_MISSING, max_depth=1):
"""Create a Table from a :class:`list`. Operates the same as
:meth:`from_data`, but forces the interpretation of the data
as a Sequence.
"""
return cls.from_data(data=data, headers=headers,
max_depth=max_depth, _data_type=ListInputType())
@classmethod
def from_object(cls, data, headers=_MISSING, max_depth=1):
"""Create a Table from an :class:`object`. Operates the same as
:meth:`from_data`, but forces the interpretation of the data
as an object. May be useful for some :class:`dict` and
:class:`list` subtypes.
"""
return cls.from_data(data=data, headers=headers,
max_depth=max_depth, _data_type=ObjectInputType())
@classmethod
def from_data(cls, data, headers=_MISSING, max_depth=1, _data_type=None):
"""Create a Table from any supported data, heuristically
selecting how to represent the data in Table format.
Args:
data (object): Any object or iterable with data to be
imported to the Table.
headers (iterable): An iterable of headers to be matched
to the data. If not explicitly passed, headers will be
guessed for certain datatypes.
max_depth (int): The level to which nested Tables should
be created (default: 1).
_data_type (InputType subclass): For advanced use cases,
do not guess the type of the input data, use this data
type instead.
"""
# TODO: seen/cycle detection/reuse ?
# maxdepth follows the same behavior as find command
# i.e., it doesn't work if max_depth=0 is passed in
if max_depth < 1:
return cls(headers=headers) # return data instead?
is_seq = isinstance(data, Sequence)
if is_seq:
if not data:
return cls(headers=headers)
to_check = data[0]
if not _data_type:
for it in cls._input_types:
if it.check_type(to_check):
_data_type = it
break
else:
# not particularly happy about this rewind-y approach
is_seq = False
to_check = data
else:
if type(data) in _DNR:
# hmm, got scalar data.
# raise an exception or make an exception, nahmsayn?
return Table([[data]], headers=headers)
to_check = data
if not _data_type:
for it in cls._input_types:
if it.check_type(to_check):
_data_type = it
break
else:
raise UnsupportedData('unsupported data type %r'
% type(data))
if headers is _MISSING:
headers = _data_type.guess_headers(to_check)
if is_seq:
entries = _data_type.get_entry_seq(data, headers)
else:
entries = [_data_type.get_entry(data, headers)]
if max_depth > 1:
new_max_depth = max_depth - 1
for i, entry in enumerate(entries):
for j, cell in enumerate(entry):
if type(cell) in _DNR:
# optimization to avoid function overhead
continue
try:
entries[i][j] = cls.from_data(cell,
max_depth=new_max_depth)
except UnsupportedData:
continue
return cls(entries, headers=headers)
def __len__(self):
return len(self._data)
def __getitem__(self, idx):
return self._data[idx]
def __repr__(self):
cn = self.__class__.__name__
if self.headers:
return '%s(headers=%r, data=%r)' % (cn, self.headers, self._data)
else:
return '%s(%r)' % (cn, self._data)
def to_html(self, orientation=None, wrapped=True,
with_headers=True, with_newlines=True, max_depth=1):
"""Render this Table to HTML. Configure the structure of Table
HTML by subclassing and overriding ``_html_*`` class
attributes.
Args:
orientation (str): one of 'auto', 'horizontal', or
'vertical' (or the first letter of any of
those). Default 'auto'.
wrapped (bool): whether or not to include the wrapping
'<table></table>' tags. Default ``True``, set to
``False`` if appending multiple Table outputs or an
otherwise customized HTML wrapping tag is needed.
with_newlines (bool): Set to ``True`` if output should
include added newlines to make the HTML more
readable. Default ``False``.
max_depth (int): Indicate how deeply to nest HTML tables
before simply reverting to :func:`repr`-ing the nested
data.
Returns:
A text string of the HTML of the rendered table.
"""
lines = []
headers = []
if with_headers and self.headers:
headers.extend(self.headers)
headers.extend([None] * (self._width - len(self.headers)))
if wrapped:
lines.append(self._html_table_tag)
orientation = orientation or 'auto'
ol = orientation[0].lower()
if ol == 'a':
ol = 'h' if len(self) > 1 else 'v'
if ol == 'h':
self._add_horizontal_html_lines(lines, headers=headers,
max_depth=max_depth)
elif ol == 'v':
self._add_vertical_html_lines(lines, headers=headers,
max_depth=max_depth)
else:
raise ValueError("expected one of 'auto', 'vertical', or"
" 'horizontal', not %r" % orientation)
if wrapped:
lines.append(self._html_table_tag_close)
sep = '\n' if with_newlines else ''
return sep.join(lines)
def _add_horizontal_html_lines(self, lines, headers, max_depth):
esc = escape_html
new_depth = max_depth - 1 if max_depth > 1 else max_depth
if max_depth > 1:
new_depth = max_depth - 1
if headers:
_thth = self._html_th_close + self._html_th
lines.append(self._html_tr + self._html_th +
_thth.join([esc(h) for h in headers]) +
self._html_th_close + self._html_tr_close)
trtd, _tdtd, _td_tr = (self._html_tr + self._html_td,
self._html_td_close + self._html_td,
self._html_td_close + self._html_tr_close)
for row in self._data:
if max_depth > 1:
_fill_parts = []
for cell in row:
if isinstance(cell, Table):
_fill_parts.append(cell.to_html(max_depth=new_depth))
else:
_fill_parts.append(esc(cell))
else:
_fill_parts = [esc(c) for c in row]
lines.append(''.join([trtd, _tdtd.join(_fill_parts), _td_tr]))
def _add_vertical_html_lines(self, lines, headers, max_depth):
esc = escape_html
new_depth = max_depth - 1 if max_depth > 1 else max_depth
tr, th, _th = self._html_tr, self._html_th, self._html_th_close
td, _tdtd = self._html_td, self._html_td_close + self._html_td
_td_tr = self._html_td_close + self._html_tr_close
for i in range(self._width):
line_parts = [tr]
if headers:
line_parts.extend([th, esc(headers[i]), _th])
if max_depth > 1:
new_depth = max_depth - 1
_fill_parts = []
for row in self._data:
cell = row[i]
if isinstance(cell, Table):
_fill_parts.append(cell.to_html(max_depth=new_depth))
else:
_fill_parts.append(esc(row[i]))
else:
_fill_parts = [esc(row[i]) for row in self._data]
line_parts.extend([td, _tdtd.join(_fill_parts), _td_tr])
lines.append(''.join(line_parts))
def to_text(self, with_headers=True, maxlen=None):
"""Get the Table's textual representation. Only works well
for Tables with non-recursive data.
Args:
with_headers (bool): Whether to include a header row at the top.
maxlen (int): Max length of data in each cell.
"""
# TODO: verify this works for markdown
lines = []
widths = []
headers = list(self.headers)
text_data = [[to_text(cell, maxlen=maxlen) for cell in row]
for row in self._data]
for idx in range(self._width):
cur_widths = [len(cur) for cur in text_data]
if with_headers:
cur_widths.append(len(to_text(headers[idx], maxlen=maxlen)))
widths.append(max(cur_widths))
if with_headers:
lines.append(' | '.join([h.center(widths[i])
for i, h in enumerate(headers)]))
lines.append('-+-'.join(['-' * w for w in widths]))
for row in text_data:
lines.append(' | '.join([cell.center(widths[j])
for j, cell in enumerate(row)]))
return '\n'.join(lines)
|
bsd-3-clause
|
anne-urai/RT_RDK
|
graphicalModels/daft.py
|
2
|
19735
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["PGM", "Node", "Edge", "Plate"]
__version__ = "0.0.4"
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from matplotlib.patches import FancyArrow
from matplotlib.patches import Rectangle as Rectangle
import numpy as np
class PGM(object):
"""
The base object for building a graphical model representation.
:param shape:
The number of rows and columns in the grid.
:param origin:
The coordinates of the bottom left corner of the plot.
:param grid_unit: (optional)
The size of the grid spacing measured in centimeters.
:param node_unit: (optional)
The base unit for the node size. This is a number in centimeters that
sets the default diameter of the nodes.
:param observed_style: (optional)
How should the "observed" nodes be indicated? This must be one of:
``"shaded"``, ``"inner"`` or ``"outer"`` where ``inner`` and
``outer`` nodes are shown as double circles with the second circle
plotted inside or outside of the standard one, respectively.
:param node_ec: (optional)
The default edge color for the nodes.
:param directed: (optional)
Should the edges be directed by default?
:param aspect: (optional)
The default aspect ratio for the nodes.
:param label_params: (optional)
Default node label parameters.
"""
def __init__(self, shape, origin=[0, 0],
grid_unit=2, node_unit=1,
observed_style="shaded",
line_width=1, node_ec="k",
directed=True, aspect=1.0,
label_params={}):
self._nodes = {}
self._edges = []
self._plates = []
self._ctx = _rendering_context(shape=shape, origin=origin,
grid_unit=grid_unit,
node_unit=node_unit,
observed_style=observed_style,
line_width=line_width,
node_ec=node_ec, directed=directed,
aspect=aspect,
label_params=label_params)
def add_node(self, node):
"""
Add a :class:`Node` to the model.
:param node:
The :class:`Node` instance to add.
"""
self._nodes[node.name] = node
return node
def add_edge(self, name1, name2, directed=None,
xoffset=0, yoffset=0, **kwargs):
"""
Construct an :class:`Edge` between two named :class:`Node` objects.
:param name1:
The name identifying the first node.
:param name2:
The name identifying the second node. If the edge is directed,
the arrow will point to this node.
:param directed: (optional)
Should this be a directed edge?
"""
if directed is None:
directed = self._ctx.directed
e = Edge(self._nodes[name1], self._nodes[name2], directed=directed,
xoffset=xoffset, yoffset=yoffset, plot_params=kwargs)
self._edges.append(e)
return e
def add_plate(self, plate):
"""
Add a :class:`Plate` object to the model.
"""
self._plates.append(plate)
return None
def render(self):
"""
Render the :class:`Plate`, :class:`Edge` and :class:`Node` objects in
the model. This will create a new figure with the correct dimensions
and plot the model in this area.
"""
self.figure = self._ctx.figure()
self.ax = self._ctx.ax()
for plate in self._plates:
plate.render(self._ctx)
for edge in self._edges:
edge.render(self._ctx)
for name in self._nodes:
self._nodes[name].render(self._ctx)
return self.ax
class Node(object):
"""
The representation of a random variable in a :class:`PGM`.
:param name:
The plain-text identifier for the node.
:param content:
The display form of the variable.
:param x:
The x-coordinate of the node in *model units*.
:param y:
The y-coordinate of the node.
:param scale: (optional)
The diameter (or height) of the node measured in multiples of
``node_unit`` as defined by the :class:`PGM` object.
:param aspect: (optional)
The aspect ratio width/height for elliptical nodes; default 1.
:param observed: (optional)
Should this be a conditioned variable?
:param fixed: (optional)
Should this be a fixed (not permitted to vary) variable?
If `True`, modifies or over-rides ``diameter``, ``offset``,
``facecolor``, and a few other ``plot_params`` settings.
This setting conflicts with ``observed``.
:param offset: (optional)
The ``(dx, dy)`` offset of the label (in points) from the default
centered position.
:param plot_params: (optional)
A dictionary of parameters to pass to the
:class:`matplotlib.patches.Ellipse` constructor.
"""
def __init__(self, name, content, x, y, scale=1, aspect=None,
observed=False, fixed=False,
offset=[0, 0], plot_params={}, label_params=None):
# Node style.
assert not (observed and fixed), \
"A node cannot be both 'observed' and 'fixed'."
self.observed = observed
self.fixed = fixed
# Metadata.
self.name = name
self.content = content
# Coordinates and dimensions.
self.x, self.y = x, y
self.scale = scale
if self.fixed:
self.scale /= 6.0
self.aspect = aspect
# Display parameters.
self.plot_params = dict(plot_params)
# Text parameters.
self.offset = list(offset)
if label_params is not None:
self.label_params = dict(label_params)
else:
self.label_params = None
def render(self, ctx):
"""
Render the node.
:param ctx:
The :class:`_rendering_context` object.
"""
# Get the axes and default plotting parameters from the rendering
# context.
ax = ctx.ax()
# Resolve the plotting parameters.
p = dict(self.plot_params)
p["lw"] = _pop_multiple(p, ctx.line_width, "lw", "linewidth")
p["ec"] = p["edgecolor"] = _pop_multiple(p, ctx.node_ec,
"ec", "edgecolor")
p["fc"] = _pop_multiple(p, "none", "fc", "facecolor")
fc = p["fc"]
p["alpha"] = p.get("alpha", 1)
# And the label parameters.
if self.label_params is None:
l = dict(ctx.label_params)
else:
l = dict(self.label_params)
l["va"] = _pop_multiple(l, "center", "va", "verticalalignment")
l["ha"] = _pop_multiple(l, "center", "ha", "horizontalalignment")
# Deal with ``fixed`` nodes.
scale = self.scale
if self.fixed:
# MAGIC: These magic numbers should depend on the grid/node units.
self.offset[1] += 6
l["va"] = "baseline"
l.pop("verticalalignment", None)
l.pop("ma", None)
if p["fc"] == "none":
p["fc"] = "k"
diameter = ctx.node_unit * scale
if self.aspect is not None:
aspect = self.aspect
else:
aspect = ctx.aspect
# Set up an observed node. Note the fc INSANITY.
if self.observed:
# Update the plotting parameters depending on the style of
# observed node.
h = float(diameter)
w = aspect * float(diameter)
if ctx.observed_style == "shaded":
p["fc"] = "0.7"
elif ctx.observed_style == "outer":
h = diameter + 0.1 * diameter
w = aspect * diameter + 0.1 * diameter
elif ctx.observed_style == "inner":
h = diameter - 0.1 * diameter
w = aspect * diameter - 0.1 * diameter
p["fc"] = fc
# Draw the background ellipse.
bg = Ellipse(xy=ctx.convert(self.x, self.y),
width=w, height=h, **p)
ax.add_artist(bg)
# Reset the face color.
p["fc"] = fc
# Draw the foreground ellipse.
if ctx.observed_style == "inner" and not self.fixed:
p["fc"] = "none"
el = Ellipse(xy=ctx.convert(self.x, self.y),
width=diameter * aspect, height=diameter, **p)
ax.add_artist(el)
# Reset the face color.
p["fc"] = fc
# Annotate the node.
ax.annotate(self.content, ctx.convert(self.x, self.y),
xycoords="data",
xytext=self.offset, textcoords="offset points",
**l)
return el
class Edge(object):
"""
An edge between two :class:`Node` objects.
:param node1:
The first :class:`Node`.
:param node2:
The second :class:`Node`. The arrow will point towards this node.
:param directed: (optional)
Should the edge be directed from ``node1`` to ``node2``? In other
words: should it have an arrow?
:param plot_params: (optional)
A dictionary of parameters to pass to the plotting command when
rendering.
"""
def __init__(self, node1, node2, directed=True,
xoffset=0, yoffset=0, plot_params={}):
self.node1 = node1
self.node2 = node2
self.directed = directed
self.plot_params = dict(plot_params)
self.xoffset = xoffset
self.yoffset = yoffset
def _get_coords(self, ctx):
"""
Get the coordinates of the line.
:param conv:
A callable coordinate conversion.
:returns:
* ``x0``, ``y0``: the coordinates of the start of the line.
* ``dx0``, ``dy0``: the displacement vector.
"""
# Scale the coordinates appropriately.
x1, y1 = ctx.convert(self.node1.x, self.node1.y)
x2, y2 = ctx.convert(self.node2.x, self.node2.y)
# Aspect ratios.
a1, a2 = self.node1.aspect, self.node2.aspect
if a1 is None:
a1 = ctx.aspect
if a2 is None:
a2 = ctx.aspect
# Compute the distances.
dx, dy = x2 - x1, y2 - y1
dist1 = np.sqrt(dy * dy + dx * dx / float(a1 ** 2))
dist2 = np.sqrt(dy * dy + dx * dx / float(a2 ** 2))
# Compute the fractional effect of the radii of the nodes.
alpha1 = 0.5 * ctx.node_unit * self.node1.scale / dist1
alpha2 = 0.5 * ctx.node_unit * self.node2.scale / dist2
# Get the coordinates of the starting position.
x0, y0 = x1 + alpha1 * dx, y1 + alpha1 * dy
# Get the width and height of the line.
dx0 = dx * (1. - alpha1 - alpha2)
dy0 = dy * (1. - alpha1 - alpha2)
return x0, y0, dx0, dy0
def render(self, ctx):
"""
Render the edge in the given axes.
:param ctx:
The :class:`_rendering_context` object.
"""
ax = ctx.ax()
p = self.plot_params
p["linewidth"] = _pop_multiple(p, ctx.line_width,
"lw", "linewidth")
# Add edge annotation.
if "label" in self.plot_params:
x, y, dx, dy = self._get_coords(ctx)
ax.annotate(self.plot_params["label"],
[x + 0.5 * dx + self.xoffset,
y + 0.5 * dy + self.yoffset],
xycoords="data",
xytext=[0, 3], textcoords="offset points",
ha="center", va="center")
if self.directed:
p["ec"] = _pop_multiple(p, "k", "ec", "edgecolor")
p["fc"] = _pop_multiple(p, "k", "fc", "facecolor")
p["head_length"] = p.get("head_length", 0.25)
p["head_width"] = p.get("head_width", 0.1)
# Build an arrow.
ar = FancyArrow(*self._get_coords(ctx), width=0,
length_includes_head=True,
**p)
# Add the arrow to the axes.
ax.add_artist(ar)
return ar
else:
p["color"] = p.get("color", "k")
# Get the right coordinates.
x, y, dx, dy = self._get_coords(ctx)
# Plot the line.
line = ax.plot([x, x + dx], [y, y + dy], **p)
return line
class Plate(object):
"""
A plate to encapsulate repeated independent processes in the model.
:param rect:
The rectangle describing the plate bounds in model coordinates.
:param label: (optional)
A string to annotate the plate.
:param label_offset: (optional)
The x and y offsets of the label text measured in points.
:param shift: (optional)
The vertical "shift" of the plate measured in model units. This will
move the bottom of the panel by ``shift`` units.
:param position: (optional)
One of ``"bottom left"`` or ``"bottom right"``.
:param rect_params: (optional)
A dictionary of parameters to pass to the
:class:`matplotlib.patches.Rectangle` constructor.
"""
def __init__(self, rect, label=None, label_offset=[5, 5], shift=0,
position="bottom left", rect_params={}, bbox={}):
self.rect = rect
self.label = label
self.label_offset = label_offset
self.shift = shift
self.rect_params = dict(rect_params)
self.bbox = dict(bbox)
self.position = position
def render(self, ctx):
"""
Render the plate in the given axes.
:param ctx:
The :class:`_rendering_context` object.
"""
ax = ctx.ax()
s = np.array([0, self.shift])
r = np.atleast_1d(self.rect)
bl = ctx.convert(*(r[:2] + s))
tr = ctx.convert(*(r[:2] + r[2:]))
r = np.concatenate([bl, tr - bl])
p = self.rect_params
p["ec"] = _pop_multiple(p, "k", "ec", "edgecolor")
p["fc"] = _pop_multiple(p, "none", "fc", "facecolor")
p["lw"] = _pop_multiple(p, ctx.line_width, "lw", "linewidth")
rect = Rectangle(r[:2], *r[2:], **p)
ax.add_artist(rect)
if self.label is not None:
offset = np.array(self.label_offset)
if self.position == "bottom left":
pos = r[:2]
ha = "left"
elif self.position == "bottom right":
pos = r[:2]
pos[0] += r[2]
ha = "right"
offset[0] -= 2 * offset[0]
else:
raise RuntimeError("Unknown positioning string: {0}"
.format(self.position))
ax.annotate(self.label, pos, xycoords="data",
xytext=offset, textcoords="offset points",
bbox=self.bbox,
horizontalalignment=ha)
return rect
class _rendering_context(object):
"""
:param shape:
The number of rows and columns in the grid.
:param origin:
The coordinates of the bottom left corner of the plot.
:param grid_unit:
The size of the grid spacing measured in centimeters.
:param node_unit:
The base unit for the node size. This is a number in centimeters that
sets the default diameter of the nodes.
:param observed_style:
How should the "observed" nodes be indicated? This must be one of:
``"shaded"``, ``"inner"`` or ``"outer"`` where ``inner`` and
``outer`` nodes are shown as double circles with the second circle
plotted inside or outside of the standard one, respectively.
:param node_ec:
The default edge color for the nodes.
:param directed:
Should the edges be directed by default?
:param aspect:
The default aspect ratio for the nodes.
:param label_params:
Default node label parameters.
"""
def __init__(self, **kwargs):
# Save the style defaults.
self.line_width = kwargs.get("line_width", 1.0)
# Make sure that the observed node style is one that we recognize.
self.observed_style = kwargs.get("observed_style", "shaded").lower()
styles = ["shaded", "inner", "outer"]
assert self.observed_style in styles, \
"Unrecognized observed node style: {0}\n".format(
self.observed_style) \
+ "\tOptions are: {0}".format(", ".join(styles))
# Set up the figure and grid dimensions.
self.shape = np.array(kwargs.get("shape", [1, 1]))
self.origin = np.array(kwargs.get("origin", [0, 0]))
self.grid_unit = kwargs.get("grid_unit", 2.0)
self.figsize = self.grid_unit * self.shape / 2.54
self.node_unit = kwargs.get("node_unit", 1.0)
self.node_ec = kwargs.get("node_ec", "k")
self.directed = kwargs.get("directed", True)
self.aspect = kwargs.get("aspect", 1.0)
self.label_params = dict(kwargs.get("label_params", {}))
# Initialize the figure to ``None`` to handle caching later.
self._figure = None
self._ax = None
def figure(self):
if self._figure is not None:
return self._figure
self._figure = plt.figure(figsize=self.figsize)
return self._figure
def ax(self):
if self._ax is not None:
return self._ax
# Add a new axis object if it doesn't exist.
self._ax = self.figure().add_axes((0, 0, 1, 1), frameon=False,
xticks=[], yticks=[])
# Set the bounds.
l0 = self.convert(*self.origin)
l1 = self.convert(*(self.origin + self.shape))
self._ax.set_xlim(l0[0], l1[0])
self._ax.set_ylim(l0[1], l1[1])
return self._ax
def convert(self, *xy):
"""
Convert from model coordinates to plot coordinates.
"""
assert len(xy) == 2
return self.grid_unit * (np.atleast_1d(xy) - self.origin)
def _pop_multiple(d, default, *args):
"""
A helper function for dealing with the way that matplotlib annoyingly
allows multiple keyword arguments. For example, ``edgecolor`` and ``ec``
are generally equivalent but no exception is thrown if they are both
used.
*Note: This function does throw a :class:`ValueError` if more than one
of the equivalent arguments are provided.*
:param d:
A :class:`dict`-like object to "pop" from.
:param default:
The default value to return if none of the arguments are provided.
:param *args:
The arguments to try to retrieve.
"""
assert len(args) > 0, "You must provide at least one argument to 'pop'."
results = []
for k in args:
try:
results.append((k, d.pop(k)))
except KeyError:
pass
if len(results) > 1:
raise TypeError("The arguments ({0}) are equivalent, you can only "
.format(", ".join([k for k, v in results]))
+ "provide one of them.")
if len(results) == 0:
return default
return results[0][1]
|
mit
|
henrykironde/scikit-learn
|
examples/semi_supervised/plot_label_propagation_structure.py
|
247
|
2432
|
"""
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
|
bsd-3-clause
|
wei-Z/Python-Machine-Learning
|
self_practice/Chapter 3 K-nearest neighbors.py
|
1
|
3032
|
# K-nearest neighbors - a laby learning algorithm
'''
KNN is a typical example of a lazy learner. It is called lazy not because of its
apparent simplicity, but because it doesn't learn a discriminative function from
the training data but memorizes the training dataset instead.
'''
# Train a model to classify the different flowers in our Iris dataset
from sklearn import datasets
import numpy as np
iris = datasets.load_iris()
X = iris.data[:, [2, 3]]
y = iris.target
from sklearn.cross_validation import train_test_split
# random_state : int or RandomState
# Pseudo-random number generator state used for random sampling.
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
X_combined = np.vstack((X_train, X_train))
y_combined = np.hstack((y_train, y_test))
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(X_train) # only compute mean and std here
X_train_std = sc.transform(X_train) # perform standardization by centering and scaling
X_test_std = sc.transform(X_test) # perform standardization by centering and scaling
# Specify the indices of the samples that we want to mark on the resulting plots.
X_combined_std = np.vstack((X_train_std, X_test_std))
y_combined = np.hstack((y_train, y_test))
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot all samples
X_test, y_test = X[test_idx, :], y[test_idx]
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=cmap(idx), marker=markers[idx], label=cl)
# highlight test samples
if test_idx:
X_test, y_test = X[test_idx, :], y[test_idx]
plt.scatter(X_test[:, 0], X_test[:, 1], c='', alpha=1.0, linewidth=1, marker='o', s=55, label='test set')
'''
By executing the following code, we will now implement a KNN model in
scikit-learn using an Euclidean distance metric:
'''
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5, p=2, metric='minkowski')
knn.fit(X_train_std, y_train)
plot_decision_regions(X_combined_std, y_combined, classifier=knn, test_idx=range(105, 150))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.show()
|
mit
|
orangeYao/twiOpinion
|
mainLearning.py
|
1
|
2006
|
#!/usr/bin/env python
#copied from zhiyao@combo: /home/zhiyao/FYPstart/largeTestData
#only library in sklearnClassify included currently
import functions
import sklearnClassify
import pandas as pd
import random
import datetime
numberForTraining = 4000
numberForTesting = 400
numberUsedAll = numberForTraining + numberForTesting
path = './output/'
input_list, input_score = functions.readTestComment(path, numberUsedAll)
pathTweet = './output/unknownHead.csv'
tweets = functions.readManyStrings(pathTweet)
tweets = tweets[0: 2000]
randomSelect = random.sample(xrange(len(input_score)), numberUsedAll)
input_list = [input_list[i] for i in randomSelect]
input_score = [input_score[i] for i in randomSelect]
print "Size of positive training set:"
print input_score.count("1")
print "Size of negative training set:"
print input_score.count("-1")
print "in filtering process..."
filtered, freq_words = functions.useFilter(input_list, True)
f_tweets = functions.useFilter(tweets, False)
print f_tweets[0:5]
raw = functions.formRawDict(filtered, input_score)
df = pd.DataFrame(raw)
wordList = list(df.itertuples(index = False, name = None))
wordList = functions.filterZeroScore(wordList)
accuracy0 = []
accuracy1 = []
accuracy2 = []
for i in range(1,5):
print i
random.shuffle(wordList)
wordList = wordList[0:numberUsedAll]
trainingList = wordList[:numberForTraining] #before index
#trainingList2 = wordList[numberUnlabeled:numberForTraining]
testList = wordList[numberForTraining:]
accuracy0.append(sklearnClassify.bayes(filtered, input_score, numberForTraining, 'BernoulliNB', f_tweets))
print ""
accuracy1.append(sklearnClassify.bayes(filtered, input_score, numberForTraining, 'MultinomialNB', f_tweets))
print ""
accuracy2.append(sklearnClassify.svm(filtered, input_score, numberForTraining, f_tweets))
print "end test"
print ""
print sum(accuracy0)/len(accuracy0)
print sum(accuracy1)/len(accuracy1)
print sum(accuracy2)/len(accuracy2)
|
mit
|
NunoEdgarGub1/scikit-learn
|
benchmarks/bench_plot_parallel_pairwise.py
|
297
|
1247
|
# Author: Mathieu Blondel <[email protected]>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
|
bsd-3-clause
|
justincassidy/scikit-learn
|
examples/linear_model/plot_theilsen.py
|
232
|
3615
|
"""
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.show()
|
bsd-3-clause
|
hunering/demo-code
|
python/books/DLFS/4.5.py
|
1
|
2239
|
import numpy as np
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.utils import to_categorical
from utils import sigmoid, softmax, cross_entropy_error, numerical_gradient
class TwoLayerNet:
def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):
self.param = {}
self.param['W1'] = weight_init_std*np.random.randn(input_size, hidden_size)
self.param['W2'] = weight_init_std*np.random.randn(hidden_size, output_size)
self.param['b1'] = np.zeros(hidden_size)
self.param['b2'] = np.zeros(output_size)
def predict(self, x):
W1, W2 = self.param['W1'], self.param['W2']
b1, b2 = self.param['b1'], self.param['b2']
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
y = softmax(a2)
return y
def loss(self, x, t):
y = self.predict(x)
return cross_entropy_error(y, t)
def accuracy(self, x, t):
y = self.predict(x)
y = np.argmax(y, axis=1)
t = np.argmax(t, axis=1)
return np.sum(y==t)/float(t.shape[0])
def numerical_gradient(self, x, t):
loss_w = lambda W : self.loss(x, t)
grads = {}
grads['W1'] = numerical_gradient(loss_w, self.param['W1'])
grads['b1'] = numerical_gradient(loss_w, self.param['b1'])
grads['W2'] = numerical_gradient(loss_w, self.param['W2'])
grads['b2'] = numerical_gradient(loss_w, self.param['b2'])
return grads
net = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], 28*28)
y_train = to_categorical(y_train, num_classes=10)
x_test = x_test.reshape(x_test.shape[0], 28*28)
y_test = to_categorical(y_test, num_classes=10)
train_loss_list = []
inters_num = 1000
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.1
for i in range(inters_num):
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
y_batch = y_train[batch_mask]
grad = net.numerical_gradient(x_batch, y_batch)
for key in ['W1', 'b1', 'W2', 'b2']:
net.param[key] -= learning_rate * grad[key]
loss = net.loss(x_batch, y_batch)
train_loss_list.append(loss)
plt.plot(train_loss_list)
plt.show()
|
gpl-3.0
|
RayMick/scikit-learn
|
sklearn/mixture/tests/test_gmm.py
|
200
|
17427
|
import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
|
bsd-3-clause
|
wholmgren/pvlib-python
|
pvlib/spa.py
|
1
|
48617
|
"""
Calculate the solar position using the NREL SPA algorithm either using
numpy arrays or compiling the code to machine language with numba.
"""
# Contributors:
# Created by Tony Lorenzo (@alorenzo175), Univ. of Arizona, 2015
from __future__ import division
import os
import threading
import warnings
import numpy as np
# this block is a way to use an environment variable to switch between
# compiling the functions with numba or just use numpy
def nocompile(*args, **kwargs):
return lambda func: func
if os.getenv('PVLIB_USE_NUMBA', '0') != '0':
try:
from numba import jit, __version__
except ImportError:
warnings.warn('Could not import numba, falling back to numpy ' +
'calculation')
jcompile = nocompile
USE_NUMBA = False
else:
major, minor = __version__.split('.')[:2]
if int(major + minor) >= 17:
# need at least numba >= 0.17.0
jcompile = jit
USE_NUMBA = True
else:
warnings.warn('Numba version must be >= 0.17.0, falling back to ' +
'numpy')
jcompile = nocompile
USE_NUMBA = False
else:
jcompile = nocompile
USE_NUMBA = False
TABLE_1_DICT = {
'L0': np.array(
[[175347046.0, 0.0, 0.0],
[3341656.0, 4.6692568, 6283.07585],
[34894.0, 4.6261, 12566.1517],
[3497.0, 2.7441, 5753.3849],
[3418.0, 2.8289, 3.5231],
[3136.0, 3.6277, 77713.7715],
[2676.0, 4.4181, 7860.4194],
[2343.0, 6.1352, 3930.2097],
[1324.0, 0.7425, 11506.7698],
[1273.0, 2.0371, 529.691],
[1199.0, 1.1096, 1577.3435],
[990.0, 5.233, 5884.927],
[902.0, 2.045, 26.298],
[857.0, 3.508, 398.149],
[780.0, 1.179, 5223.694],
[753.0, 2.533, 5507.553],
[505.0, 4.583, 18849.228],
[492.0, 4.205, 775.523],
[357.0, 2.92, 0.067],
[317.0, 5.849, 11790.629],
[284.0, 1.899, 796.298],
[271.0, 0.315, 10977.079],
[243.0, 0.345, 5486.778],
[206.0, 4.806, 2544.314],
[205.0, 1.869, 5573.143],
[202.0, 2.458, 6069.777],
[156.0, 0.833, 213.299],
[132.0, 3.411, 2942.463],
[126.0, 1.083, 20.775],
[115.0, 0.645, 0.98],
[103.0, 0.636, 4694.003],
[102.0, 0.976, 15720.839],
[102.0, 4.267, 7.114],
[99.0, 6.21, 2146.17],
[98.0, 0.68, 155.42],
[86.0, 5.98, 161000.69],
[85.0, 1.3, 6275.96],
[85.0, 3.67, 71430.7],
[80.0, 1.81, 17260.15],
[79.0, 3.04, 12036.46],
[75.0, 1.76, 5088.63],
[74.0, 3.5, 3154.69],
[74.0, 4.68, 801.82],
[70.0, 0.83, 9437.76],
[62.0, 3.98, 8827.39],
[61.0, 1.82, 7084.9],
[57.0, 2.78, 6286.6],
[56.0, 4.39, 14143.5],
[56.0, 3.47, 6279.55],
[52.0, 0.19, 12139.55],
[52.0, 1.33, 1748.02],
[51.0, 0.28, 5856.48],
[49.0, 0.49, 1194.45],
[41.0, 5.37, 8429.24],
[41.0, 2.4, 19651.05],
[39.0, 6.17, 10447.39],
[37.0, 6.04, 10213.29],
[37.0, 2.57, 1059.38],
[36.0, 1.71, 2352.87],
[36.0, 1.78, 6812.77],
[33.0, 0.59, 17789.85],
[30.0, 0.44, 83996.85],
[30.0, 2.74, 1349.87],
[25.0, 3.16, 4690.48]]),
'L1': np.array(
[[628331966747.0, 0.0, 0.0],
[206059.0, 2.678235, 6283.07585],
[4303.0, 2.6351, 12566.1517],
[425.0, 1.59, 3.523],
[119.0, 5.796, 26.298],
[109.0, 2.966, 1577.344],
[93.0, 2.59, 18849.23],
[72.0, 1.14, 529.69],
[68.0, 1.87, 398.15],
[67.0, 4.41, 5507.55],
[59.0, 2.89, 5223.69],
[56.0, 2.17, 155.42],
[45.0, 0.4, 796.3],
[36.0, 0.47, 775.52],
[29.0, 2.65, 7.11],
[21.0, 5.34, 0.98],
[19.0, 1.85, 5486.78],
[19.0, 4.97, 213.3],
[17.0, 2.99, 6275.96],
[16.0, 0.03, 2544.31],
[16.0, 1.43, 2146.17],
[15.0, 1.21, 10977.08],
[12.0, 2.83, 1748.02],
[12.0, 3.26, 5088.63],
[12.0, 5.27, 1194.45],
[12.0, 2.08, 4694.0],
[11.0, 0.77, 553.57],
[10.0, 1.3, 6286.6],
[10.0, 4.24, 1349.87],
[9.0, 2.7, 242.73],
[9.0, 5.64, 951.72],
[8.0, 5.3, 2352.87],
[6.0, 2.65, 9437.76],
[6.0, 4.67, 4690.48]]),
'L2': np.array(
[[52919.0, 0.0, 0.0],
[8720.0, 1.0721, 6283.0758],
[309.0, 0.867, 12566.152],
[27.0, 0.05, 3.52],
[16.0, 5.19, 26.3],
[16.0, 3.68, 155.42],
[10.0, 0.76, 18849.23],
[9.0, 2.06, 77713.77],
[7.0, 0.83, 775.52],
[5.0, 4.66, 1577.34],
[4.0, 1.03, 7.11],
[4.0, 3.44, 5573.14],
[3.0, 5.14, 796.3],
[3.0, 6.05, 5507.55],
[3.0, 1.19, 242.73],
[3.0, 6.12, 529.69],
[3.0, 0.31, 398.15],
[3.0, 2.28, 553.57],
[2.0, 4.38, 5223.69],
[2.0, 3.75, 0.98]]),
'L3': np.array(
[[289.0, 5.844, 6283.076],
[35.0, 0.0, 0.0],
[17.0, 5.49, 12566.15],
[3.0, 5.2, 155.42],
[1.0, 4.72, 3.52],
[1.0, 5.3, 18849.23],
[1.0, 5.97, 242.73]]),
'L4': np.array(
[[114.0, 3.142, 0.0],
[8.0, 4.13, 6283.08],
[1.0, 3.84, 12566.15]]),
'L5': np.array(
[[1.0, 3.14, 0.0]]),
'B0': np.array(
[[280.0, 3.199, 84334.662],
[102.0, 5.422, 5507.553],
[80.0, 3.88, 5223.69],
[44.0, 3.7, 2352.87],
[32.0, 4.0, 1577.34]]),
'B1': np.array(
[[9.0, 3.9, 5507.55],
[6.0, 1.73, 5223.69]]),
'R0': np.array(
[[100013989.0, 0.0, 0.0],
[1670700.0, 3.0984635, 6283.07585],
[13956.0, 3.05525, 12566.1517],
[3084.0, 5.1985, 77713.7715],
[1628.0, 1.1739, 5753.3849],
[1576.0, 2.8469, 7860.4194],
[925.0, 5.453, 11506.77],
[542.0, 4.564, 3930.21],
[472.0, 3.661, 5884.927],
[346.0, 0.964, 5507.553],
[329.0, 5.9, 5223.694],
[307.0, 0.299, 5573.143],
[243.0, 4.273, 11790.629],
[212.0, 5.847, 1577.344],
[186.0, 5.022, 10977.079],
[175.0, 3.012, 18849.228],
[110.0, 5.055, 5486.778],
[98.0, 0.89, 6069.78],
[86.0, 5.69, 15720.84],
[86.0, 1.27, 161000.69],
[65.0, 0.27, 17260.15],
[63.0, 0.92, 529.69],
[57.0, 2.01, 83996.85],
[56.0, 5.24, 71430.7],
[49.0, 3.25, 2544.31],
[47.0, 2.58, 775.52],
[45.0, 5.54, 9437.76],
[43.0, 6.01, 6275.96],
[39.0, 5.36, 4694.0],
[38.0, 2.39, 8827.39],
[37.0, 0.83, 19651.05],
[37.0, 4.9, 12139.55],
[36.0, 1.67, 12036.46],
[35.0, 1.84, 2942.46],
[33.0, 0.24, 7084.9],
[32.0, 0.18, 5088.63],
[32.0, 1.78, 398.15],
[28.0, 1.21, 6286.6],
[28.0, 1.9, 6279.55],
[26.0, 4.59, 10447.39]]),
'R1': np.array(
[[103019.0, 1.10749, 6283.07585],
[1721.0, 1.0644, 12566.1517],
[702.0, 3.142, 0.0],
[32.0, 1.02, 18849.23],
[31.0, 2.84, 5507.55],
[25.0, 1.32, 5223.69],
[18.0, 1.42, 1577.34],
[10.0, 5.91, 10977.08],
[9.0, 1.42, 6275.96],
[9.0, 0.27, 5486.78]]),
'R2': np.array(
[[4359.0, 5.7846, 6283.0758],
[124.0, 5.579, 12566.152],
[12.0, 3.14, 0.0],
[9.0, 3.63, 77713.77],
[6.0, 1.87, 5573.14],
[3.0, 5.47, 18849.23]]),
'R3': np.array(
[[145.0, 4.273, 6283.076],
[7.0, 3.92, 12566.15]]),
'R4': np.array(
[[4.0, 2.56, 6283.08]])
}
resize_mapping = {
'L1': (64, 3), 'L2': (64, 3), 'L3': (64, 3), 'L4': (64, 3), 'L5': (64, 3),
'B1': (5, 3), 'R1': (40, 3), 'R2': (40, 3), 'R3': (40, 3), 'R4': (40, 3)}
# make arrays uniform size for efficient broadcasting in numba, fill with 0s
# np.resize does not work because it fills with repeated copies
for key, dims in resize_mapping.items():
new_rows = dims[0] - TABLE_1_DICT[key].shape[0]
TABLE_1_DICT[key] = np.append(TABLE_1_DICT[key], np.zeros((new_rows, 3)),
axis=0)
HELIO_LONG_TABLE = np.array([TABLE_1_DICT['L0'],
TABLE_1_DICT['L1'],
TABLE_1_DICT['L2'],
TABLE_1_DICT['L3'],
TABLE_1_DICT['L4'],
TABLE_1_DICT['L5']])
HELIO_LAT_TABLE = np.array([TABLE_1_DICT['B0'],
TABLE_1_DICT['B1']])
HELIO_RADIUS_TABLE = np.array([TABLE_1_DICT['R0'],
TABLE_1_DICT['R1'],
TABLE_1_DICT['R2'],
TABLE_1_DICT['R3'],
TABLE_1_DICT['R4']])
NUTATION_ABCD_ARRAY = np.array([
[-171996, -174.2, 92025, 8.9],
[-13187, -1.6, 5736, -3.1],
[-2274, -0.2, 977, -0.5],
[2062, 0.2, -895, 0.5],
[1426, -3.4, 54, -0.1],
[712, 0.1, -7, 0],
[-517, 1.2, 224, -0.6],
[-386, -0.4, 200, 0],
[-301, 0, 129, -0.1],
[217, -0.5, -95, 0.3],
[-158, 0, 0, 0],
[129, 0.1, -70, 0],
[123, 0, -53, 0],
[63, 0, 0, 0],
[63, 0.1, -33, 0],
[-59, 0, 26, 0],
[-58, -0.1, 32, 0],
[-51, 0, 27, 0],
[48, 0, 0, 0],
[46, 0, -24, 0],
[-38, 0, 16, 0],
[-31, 0, 13, 0],
[29, 0, 0, 0],
[29, 0, -12, 0],
[26, 0, 0, 0],
[-22, 0, 0, 0],
[21, 0, -10, 0],
[17, -0.1, 0, 0],
[16, 0, -8, 0],
[-16, 0.1, 7, 0],
[-15, 0, 9, 0],
[-13, 0, 7, 0],
[-12, 0, 6, 0],
[11, 0, 0, 0],
[-10, 0, 5, 0],
[-8, 0, 3, 0],
[7, 0, -3, 0],
[-7, 0, 0, 0],
[-7, 0, 3, 0],
[-7, 0, 3, 0],
[6, 0, 0, 0],
[6, 0, -3, 0],
[6, 0, -3, 0],
[-6, 0, 3, 0],
[-6, 0, 3, 0],
[5, 0, 0, 0],
[-5, 0, 3, 0],
[-5, 0, 3, 0],
[-5, 0, 3, 0],
[4, 0, 0, 0],
[4, 0, 0, 0],
[4, 0, 0, 0],
[-4, 0, 0, 0],
[-4, 0, 0, 0],
[-4, 0, 0, 0],
[3, 0, 0, 0],
[-3, 0, 0, 0],
[-3, 0, 0, 0],
[-3, 0, 0, 0],
[-3, 0, 0, 0],
[-3, 0, 0, 0],
[-3, 0, 0, 0],
[-3, 0, 0, 0],
])
NUTATION_YTERM_ARRAY = np.array([
[0, 0, 0, 0, 1],
[-2, 0, 0, 2, 2],
[0, 0, 0, 2, 2],
[0, 0, 0, 0, 2],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[-2, 1, 0, 2, 2],
[0, 0, 0, 2, 1],
[0, 0, 1, 2, 2],
[-2, -1, 0, 2, 2],
[-2, 0, 1, 0, 0],
[-2, 0, 0, 2, 1],
[0, 0, -1, 2, 2],
[2, 0, 0, 0, 0],
[0, 0, 1, 0, 1],
[2, 0, -1, 2, 2],
[0, 0, -1, 0, 1],
[0, 0, 1, 2, 1],
[-2, 0, 2, 0, 0],
[0, 0, -2, 2, 1],
[2, 0, 0, 2, 2],
[0, 0, 2, 2, 2],
[0, 0, 2, 0, 0],
[-2, 0, 1, 2, 2],
[0, 0, 0, 2, 0],
[-2, 0, 0, 2, 0],
[0, 0, -1, 2, 1],
[0, 2, 0, 0, 0],
[2, 0, -1, 0, 1],
[-2, 2, 0, 2, 2],
[0, 1, 0, 0, 1],
[-2, 0, 1, 0, 1],
[0, -1, 0, 0, 1],
[0, 0, 2, -2, 0],
[2, 0, -1, 2, 1],
[2, 0, 1, 2, 2],
[0, 1, 0, 2, 2],
[-2, 1, 1, 0, 0],
[0, -1, 0, 2, 2],
[2, 0, 0, 2, 1],
[2, 0, 1, 0, 0],
[-2, 0, 2, 2, 2],
[-2, 0, 1, 2, 1],
[2, 0, -2, 0, 1],
[2, 0, 0, 0, 1],
[0, -1, 1, 0, 0],
[-2, -1, 0, 2, 1],
[-2, 0, 0, 0, 1],
[0, 0, 2, 2, 1],
[-2, 0, 2, 0, 1],
[-2, 1, 0, 2, 1],
[0, 0, 1, -2, 0],
[-1, 0, 1, 0, 0],
[-2, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 0, 1, 2, 0],
[0, 0, -2, 2, 2],
[-1, -1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, -1, 1, 2, 2],
[2, -1, -1, 2, 2],
[0, 0, 3, 2, 2],
[2, -1, 0, 2, 2],
])
@jcompile('float64(int64, int64, int64, int64, int64, int64, int64)',
nopython=True)
def julian_day_dt(year, month, day, hour, minute, second, microsecond):
"""This is the original way to calculate the julian day from the NREL paper.
However, it is much faster to convert to unix/epoch time and then convert
to julian day. Note that the date must be UTC."""
if month <= 2:
year = year-1
month = month+12
a = int(year/100)
b = 2 - a + int(a * 0.25)
frac_of_day = (microsecond + (second + minute * 60 + hour * 3600)
) * 1.0 / (3600*24)
d = day + frac_of_day
jd = (int(365.25 * (year + 4716)) + int(30.6001 * (month + 1)) + d +
b - 1524.5)
return jd
@jcompile('float64(float64)', nopython=True)
def julian_day(unixtime):
jd = unixtime * 1.0 / 86400 + 2440587.5
return jd
@jcompile('float64(float64, float64)', nopython=True)
def julian_ephemeris_day(julian_day, delta_t):
jde = julian_day + delta_t * 1.0 / 86400
return jde
@jcompile('float64(float64)', nopython=True)
def julian_century(julian_day):
jc = (julian_day - 2451545) * 1.0 / 36525
return jc
@jcompile('float64(float64)', nopython=True)
def julian_ephemeris_century(julian_ephemeris_day):
jce = (julian_ephemeris_day - 2451545) * 1.0 / 36525
return jce
@jcompile('float64(float64)', nopython=True)
def julian_ephemeris_millennium(julian_ephemeris_century):
jme = julian_ephemeris_century * 1.0 / 10
return jme
@jcompile('float64(float64)', nopython=True)
def heliocentric_longitude(jme):
l0 = 0.0
l1 = 0.0
l2 = 0.0
l3 = 0.0
l4 = 0.0
l5 = 0.0
for row in range(HELIO_LONG_TABLE.shape[1]):
l0 += (HELIO_LONG_TABLE[0, row, 0]
* np.cos(HELIO_LONG_TABLE[0, row, 1]
+ HELIO_LONG_TABLE[0, row, 2] * jme)
)
l1 += (HELIO_LONG_TABLE[1, row, 0]
* np.cos(HELIO_LONG_TABLE[1, row, 1]
+ HELIO_LONG_TABLE[1, row, 2] * jme)
)
l2 += (HELIO_LONG_TABLE[2, row, 0]
* np.cos(HELIO_LONG_TABLE[2, row, 1]
+ HELIO_LONG_TABLE[2, row, 2] * jme)
)
l3 += (HELIO_LONG_TABLE[3, row, 0]
* np.cos(HELIO_LONG_TABLE[3, row, 1]
+ HELIO_LONG_TABLE[3, row, 2] * jme)
)
l4 += (HELIO_LONG_TABLE[4, row, 0]
* np.cos(HELIO_LONG_TABLE[4, row, 1]
+ HELIO_LONG_TABLE[4, row, 2] * jme)
)
l5 += (HELIO_LONG_TABLE[5, row, 0]
* np.cos(HELIO_LONG_TABLE[5, row, 1]
+ HELIO_LONG_TABLE[5, row, 2] * jme)
)
l_rad = (l0 + l1 * jme + l2 * jme**2 + l3 * jme**3 + l4 * jme**4 +
l5 * jme**5)/10**8
l = np.rad2deg(l_rad)
return l % 360
@jcompile('float64(float64)', nopython=True)
def heliocentric_latitude(jme):
b0 = 0.0
b1 = 0.0
for row in range(HELIO_LAT_TABLE.shape[1]):
b0 += (HELIO_LAT_TABLE[0, row, 0]
* np.cos(HELIO_LAT_TABLE[0, row, 1]
+ HELIO_LAT_TABLE[0, row, 2] * jme)
)
b1 += (HELIO_LAT_TABLE[1, row, 0]
* np.cos(HELIO_LAT_TABLE[1, row, 1]
+ HELIO_LAT_TABLE[1, row, 2] * jme)
)
b_rad = (b0 + b1 * jme)/10**8
b = np.rad2deg(b_rad)
return b
@jcompile('float64(float64)', nopython=True)
def heliocentric_radius_vector(jme):
r0 = 0.0
r1 = 0.0
r2 = 0.0
r3 = 0.0
r4 = 0.0
for row in range(HELIO_RADIUS_TABLE.shape[1]):
r0 += (HELIO_RADIUS_TABLE[0, row, 0]
* np.cos(HELIO_RADIUS_TABLE[0, row, 1]
+ HELIO_RADIUS_TABLE[0, row, 2] * jme)
)
r1 += (HELIO_RADIUS_TABLE[1, row, 0]
* np.cos(HELIO_RADIUS_TABLE[1, row, 1]
+ HELIO_RADIUS_TABLE[1, row, 2] * jme)
)
r2 += (HELIO_RADIUS_TABLE[2, row, 0]
* np.cos(HELIO_RADIUS_TABLE[2, row, 1]
+ HELIO_RADIUS_TABLE[2, row, 2] * jme)
)
r3 += (HELIO_RADIUS_TABLE[3, row, 0]
* np.cos(HELIO_RADIUS_TABLE[3, row, 1]
+ HELIO_RADIUS_TABLE[3, row, 2] * jme)
)
r4 += (HELIO_RADIUS_TABLE[4, row, 0]
* np.cos(HELIO_RADIUS_TABLE[4, row, 1]
+ HELIO_RADIUS_TABLE[4, row, 2] * jme)
)
r = (r0 + r1 * jme + r2 * jme**2 + r3 * jme**3 + r4 * jme**4)/10**8
return r
@jcompile('float64(float64)', nopython=True)
def geocentric_longitude(heliocentric_longitude):
theta = heliocentric_longitude + 180.0
return theta % 360
@jcompile('float64(float64)', nopython=True)
def geocentric_latitude(heliocentric_latitude):
beta = -1.0*heliocentric_latitude
return beta
@jcompile('float64(float64)', nopython=True)
def mean_elongation(julian_ephemeris_century):
x0 = (297.85036
+ 445267.111480 * julian_ephemeris_century
- 0.0019142 * julian_ephemeris_century**2
+ julian_ephemeris_century**3 / 189474)
return x0
@jcompile('float64(float64)', nopython=True)
def mean_anomaly_sun(julian_ephemeris_century):
x1 = (357.52772
+ 35999.050340 * julian_ephemeris_century
- 0.0001603 * julian_ephemeris_century**2
- julian_ephemeris_century**3 / 300000)
return x1
@jcompile('float64(float64)', nopython=True)
def mean_anomaly_moon(julian_ephemeris_century):
x2 = (134.96298
+ 477198.867398 * julian_ephemeris_century
+ 0.0086972 * julian_ephemeris_century**2
+ julian_ephemeris_century**3 / 56250)
return x2
@jcompile('float64(float64)', nopython=True)
def moon_argument_latitude(julian_ephemeris_century):
x3 = (93.27191
+ 483202.017538 * julian_ephemeris_century
- 0.0036825 * julian_ephemeris_century**2
+ julian_ephemeris_century**3 / 327270)
return x3
@jcompile('float64(float64)', nopython=True)
def moon_ascending_longitude(julian_ephemeris_century):
x4 = (125.04452
- 1934.136261 * julian_ephemeris_century
+ 0.0020708 * julian_ephemeris_century**2
+ julian_ephemeris_century**3 / 450000)
return x4
@jcompile('float64(float64, float64, float64, float64, float64, float64)',
nopython=True)
def longitude_nutation(julian_ephemeris_century, x0, x1, x2, x3, x4):
delta_psi_sum = 0
for row in range(NUTATION_YTERM_ARRAY.shape[0]):
a = NUTATION_ABCD_ARRAY[row, 0]
b = NUTATION_ABCD_ARRAY[row, 1]
argsin = (NUTATION_YTERM_ARRAY[row, 0]*x0 +
NUTATION_YTERM_ARRAY[row, 1]*x1 +
NUTATION_YTERM_ARRAY[row, 2]*x2 +
NUTATION_YTERM_ARRAY[row, 3]*x3 +
NUTATION_YTERM_ARRAY[row, 4]*x4)
term = (a + b * julian_ephemeris_century) * np.sin(np.radians(argsin))
delta_psi_sum += term
delta_psi = delta_psi_sum*1.0/36000000
return delta_psi
@jcompile('float64(float64, float64, float64, float64, float64, float64)',
nopython=True)
def obliquity_nutation(julian_ephemeris_century, x0, x1, x2, x3, x4):
delta_eps_sum = 0.0
for row in range(NUTATION_YTERM_ARRAY.shape[0]):
c = NUTATION_ABCD_ARRAY[row, 2]
d = NUTATION_ABCD_ARRAY[row, 3]
argcos = (NUTATION_YTERM_ARRAY[row, 0]*x0 +
NUTATION_YTERM_ARRAY[row, 1]*x1 +
NUTATION_YTERM_ARRAY[row, 2]*x2 +
NUTATION_YTERM_ARRAY[row, 3]*x3 +
NUTATION_YTERM_ARRAY[row, 4]*x4)
term = (c + d * julian_ephemeris_century) * np.cos(np.radians(argcos))
delta_eps_sum += term
delta_eps = delta_eps_sum*1.0/36000000
return delta_eps
@jcompile('float64(float64)', nopython=True)
def mean_ecliptic_obliquity(julian_ephemeris_millennium):
U = 1.0*julian_ephemeris_millennium/10
e0 = (84381.448 - 4680.93 * U - 1.55 * U**2
+ 1999.25 * U**3 - 51.38 * U**4 - 249.67 * U**5
- 39.05 * U**6 + 7.12 * U**7 + 27.87 * U**8
+ 5.79 * U**9 + 2.45 * U**10)
return e0
@jcompile('float64(float64, float64)', nopython=True)
def true_ecliptic_obliquity(mean_ecliptic_obliquity, obliquity_nutation):
e0 = mean_ecliptic_obliquity
deleps = obliquity_nutation
e = e0*1.0/3600 + deleps
return e
@jcompile('float64(float64)', nopython=True)
def aberration_correction(earth_radius_vector):
deltau = -20.4898 / (3600 * earth_radius_vector)
return deltau
@jcompile('float64(float64, float64, float64)', nopython=True)
def apparent_sun_longitude(geocentric_longitude, longitude_nutation,
aberration_correction):
lamd = geocentric_longitude + longitude_nutation + aberration_correction
return lamd
@jcompile('float64(float64, float64)', nopython=True)
def mean_sidereal_time(julian_day, julian_century):
v0 = (280.46061837 + 360.98564736629 * (julian_day - 2451545)
+ 0.000387933 * julian_century**2 - julian_century**3 / 38710000)
return v0 % 360.0
@jcompile('float64(float64, float64, float64)', nopython=True)
def apparent_sidereal_time(mean_sidereal_time, longitude_nutation,
true_ecliptic_obliquity):
v = mean_sidereal_time + longitude_nutation * np.cos(
np.radians(true_ecliptic_obliquity))
return v
@jcompile('float64(float64, float64, float64)', nopython=True)
def geocentric_sun_right_ascension(apparent_sun_longitude,
true_ecliptic_obliquity,
geocentric_latitude):
num = (np.sin(np.radians(apparent_sun_longitude))
* np.cos(np.radians(true_ecliptic_obliquity))
- np.tan(np.radians(geocentric_latitude))
* np.sin(np.radians(true_ecliptic_obliquity)))
alpha = np.degrees(np.arctan2(num, np.cos(
np.radians(apparent_sun_longitude))))
return alpha % 360
@jcompile('float64(float64, float64, float64)', nopython=True)
def geocentric_sun_declination(apparent_sun_longitude, true_ecliptic_obliquity,
geocentric_latitude):
delta = np.degrees(np.arcsin(np.sin(np.radians(geocentric_latitude)) *
np.cos(np.radians(true_ecliptic_obliquity)) +
np.cos(np.radians(geocentric_latitude)) *
np.sin(np.radians(true_ecliptic_obliquity)) *
np.sin(np.radians(apparent_sun_longitude))))
return delta
@jcompile('float64(float64, float64, float64)', nopython=True)
def local_hour_angle(apparent_sidereal_time, observer_longitude,
sun_right_ascension):
"""Measured westward from south"""
H = apparent_sidereal_time + observer_longitude - sun_right_ascension
return H % 360
@jcompile('float64(float64)', nopython=True)
def equatorial_horizontal_parallax(earth_radius_vector):
xi = 8.794 / (3600 * earth_radius_vector)
return xi
@jcompile('float64(float64)', nopython=True)
def uterm(observer_latitude):
u = np.arctan(0.99664719 * np.tan(np.radians(observer_latitude)))
return u
@jcompile('float64(float64, float64, float64)', nopython=True)
def xterm(u, observer_latitude, observer_elevation):
x = (np.cos(u) + observer_elevation / 6378140
* np.cos(np.radians(observer_latitude)))
return x
@jcompile('float64(float64, float64, float64)', nopython=True)
def yterm(u, observer_latitude, observer_elevation):
y = (0.99664719 * np.sin(u) + observer_elevation / 6378140
* np.sin(np.radians(observer_latitude)))
return y
@jcompile('float64(float64, float64,float64, float64)', nopython=True)
def parallax_sun_right_ascension(xterm, equatorial_horizontal_parallax,
local_hour_angle, geocentric_sun_declination):
num = (-xterm * np.sin(np.radians(equatorial_horizontal_parallax))
* np.sin(np.radians(local_hour_angle)))
denom = (np.cos(np.radians(geocentric_sun_declination))
- xterm * np.sin(np.radians(equatorial_horizontal_parallax))
* np.cos(np.radians(local_hour_angle)))
delta_alpha = np.degrees(np.arctan2(num, denom))
return delta_alpha
@jcompile('float64(float64, float64)', nopython=True)
def topocentric_sun_right_ascension(geocentric_sun_right_ascension,
parallax_sun_right_ascension):
alpha_prime = geocentric_sun_right_ascension + parallax_sun_right_ascension
return alpha_prime
@jcompile('float64(float64, float64, float64, float64, float64, float64)',
nopython=True)
def topocentric_sun_declination(geocentric_sun_declination, xterm, yterm,
equatorial_horizontal_parallax,
parallax_sun_right_ascension,
local_hour_angle):
num = ((np.sin(np.radians(geocentric_sun_declination)) - yterm
* np.sin(np.radians(equatorial_horizontal_parallax)))
* np.cos(np.radians(parallax_sun_right_ascension)))
denom = (np.cos(np.radians(geocentric_sun_declination)) - xterm
* np.sin(np.radians(equatorial_horizontal_parallax))
* np.cos(np.radians(local_hour_angle)))
delta = np.degrees(np.arctan2(num, denom))
return delta
@jcompile('float64(float64, float64)', nopython=True)
def topocentric_local_hour_angle(local_hour_angle,
parallax_sun_right_ascension):
H_prime = local_hour_angle - parallax_sun_right_ascension
return H_prime
@jcompile('float64(float64, float64, float64)', nopython=True)
def topocentric_elevation_angle_without_atmosphere(observer_latitude,
topocentric_sun_declination,
topocentric_local_hour_angle
):
e0 = np.degrees(np.arcsin(
np.sin(np.radians(observer_latitude))
* np.sin(np.radians(topocentric_sun_declination))
+ np.cos(np.radians(observer_latitude))
* np.cos(np.radians(topocentric_sun_declination))
* np.cos(np.radians(topocentric_local_hour_angle))))
return e0
@jcompile('float64(float64, float64, float64, float64)', nopython=True)
def atmospheric_refraction_correction(local_pressure, local_temp,
topocentric_elevation_angle_wo_atmosphere,
atmos_refract):
# switch sets delta_e when the sun is below the horizon
switch = topocentric_elevation_angle_wo_atmosphere >= -1.0 * (
0.26667 + atmos_refract)
delta_e = ((local_pressure / 1010.0) * (283.0 / (273 + local_temp))
* 1.02 / (60 * np.tan(np.radians(
topocentric_elevation_angle_wo_atmosphere
+ 10.3 / (topocentric_elevation_angle_wo_atmosphere
+ 5.11))))) * switch
return delta_e
@jcompile('float64(float64, float64)', nopython=True)
def topocentric_elevation_angle(topocentric_elevation_angle_without_atmosphere,
atmospheric_refraction_correction):
e = (topocentric_elevation_angle_without_atmosphere
+ atmospheric_refraction_correction)
return e
@jcompile('float64(float64)', nopython=True)
def topocentric_zenith_angle(topocentric_elevation_angle):
theta = 90 - topocentric_elevation_angle
return theta
@jcompile('float64(float64, float64, float64)', nopython=True)
def topocentric_astronomers_azimuth(topocentric_local_hour_angle,
topocentric_sun_declination,
observer_latitude):
num = np.sin(np.radians(topocentric_local_hour_angle))
denom = (np.cos(np.radians(topocentric_local_hour_angle))
* np.sin(np.radians(observer_latitude))
- np.tan(np.radians(topocentric_sun_declination))
* np.cos(np.radians(observer_latitude)))
gamma = np.degrees(np.arctan2(num, denom))
return gamma % 360
@jcompile('float64(float64)', nopython=True)
def topocentric_azimuth_angle(topocentric_astronomers_azimuth):
phi = topocentric_astronomers_azimuth + 180
return phi % 360
@jcompile('float64(float64)', nopython=True)
def sun_mean_longitude(julian_ephemeris_millennium):
M = (280.4664567 + 360007.6982779 * julian_ephemeris_millennium
+ 0.03032028 * julian_ephemeris_millennium**2
+ julian_ephemeris_millennium**3 / 49931
- julian_ephemeris_millennium**4 / 15300
- julian_ephemeris_millennium**5 / 2000000)
return M
@jcompile('float64(float64, float64, float64, float64)', nopython=True)
def equation_of_time(sun_mean_longitude, geocentric_sun_right_ascension,
longitude_nutation, true_ecliptic_obliquity):
E = (sun_mean_longitude - 0.0057183 - geocentric_sun_right_ascension +
longitude_nutation * np.cos(np.radians(true_ecliptic_obliquity)))
# limit between 0 and 360
E = E % 360
# convert to minutes
E *= 4
greater = E > 20
less = E < -20
other = (E <= 20) & (E >= -20)
E = greater * (E - 1440) + less * (E + 1440) + other * E
return E
@jcompile('void(float64[:], float64[:], float64[:,:])', nopython=True,
nogil=True)
def solar_position_loop(unixtime, loc_args, out):
"""Loop through the time array and calculate the solar position"""
lat = loc_args[0]
lon = loc_args[1]
elev = loc_args[2]
pressure = loc_args[3]
temp = loc_args[4]
delta_t = loc_args[5]
atmos_refract = loc_args[6]
sst = loc_args[7]
esd = loc_args[8]
for i in range(unixtime.shape[0]):
utime = unixtime[i]
jd = julian_day(utime)
jde = julian_ephemeris_day(jd, delta_t)
jc = julian_century(jd)
jce = julian_ephemeris_century(jde)
jme = julian_ephemeris_millennium(jce)
R = heliocentric_radius_vector(jme)
if esd:
out[0, i] = R
continue
L = heliocentric_longitude(jme)
B = heliocentric_latitude(jme)
Theta = geocentric_longitude(L)
beta = geocentric_latitude(B)
x0 = mean_elongation(jce)
x1 = mean_anomaly_sun(jce)
x2 = mean_anomaly_moon(jce)
x3 = moon_argument_latitude(jce)
x4 = moon_ascending_longitude(jce)
delta_psi = longitude_nutation(jce, x0, x1, x2, x3, x4)
delta_epsilon = obliquity_nutation(jce, x0, x1, x2, x3, x4)
epsilon0 = mean_ecliptic_obliquity(jme)
epsilon = true_ecliptic_obliquity(epsilon0, delta_epsilon)
delta_tau = aberration_correction(R)
lamd = apparent_sun_longitude(Theta, delta_psi, delta_tau)
v0 = mean_sidereal_time(jd, jc)
v = apparent_sidereal_time(v0, delta_psi, epsilon)
alpha = geocentric_sun_right_ascension(lamd, epsilon, beta)
delta = geocentric_sun_declination(lamd, epsilon, beta)
if sst:
out[0, i] = v
out[1, i] = alpha
out[2, i] = delta
continue
m = sun_mean_longitude(jme)
eot = equation_of_time(m, alpha, delta_psi, epsilon)
H = local_hour_angle(v, lon, alpha)
xi = equatorial_horizontal_parallax(R)
u = uterm(lat)
x = xterm(u, lat, elev)
y = yterm(u, lat, elev)
delta_alpha = parallax_sun_right_ascension(x, xi, H, delta)
delta_prime = topocentric_sun_declination(delta, x, y, xi, delta_alpha,
H)
H_prime = topocentric_local_hour_angle(H, delta_alpha)
e0 = topocentric_elevation_angle_without_atmosphere(lat, delta_prime,
H_prime)
delta_e = atmospheric_refraction_correction(pressure, temp, e0,
atmos_refract)
e = topocentric_elevation_angle(e0, delta_e)
theta = topocentric_zenith_angle(e)
theta0 = topocentric_zenith_angle(e0)
gamma = topocentric_astronomers_azimuth(H_prime, delta_prime, lat)
phi = topocentric_azimuth_angle(gamma)
out[0, i] = theta
out[1, i] = theta0
out[2, i] = e
out[3, i] = e0
out[4, i] = phi
out[5, i] = eot
def solar_position_numba(unixtime, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads, sst=False, esd=False):
"""Calculate the solar position using the numba compiled functions
and multiple threads. Very slow if functions are not numba compiled.
"""
# these args are the same for each thread
loc_args = np.array([lat, lon, elev, pressure, temp, delta_t,
atmos_refract, sst, esd])
# construct dims x ulength array to put the results in
ulength = unixtime.shape[0]
if sst:
dims = 3
elif esd:
dims = 1
else:
dims = 6
result = np.empty((dims, ulength), dtype=np.float64)
if unixtime.dtype != np.float64:
unixtime = unixtime.astype(np.float64)
if ulength < numthreads:
warnings.warn('The number of threads is more than the length of '
'the time array. Only using %s threads.'.format(ulength))
numthreads = ulength
if numthreads <= 1:
solar_position_loop(unixtime, loc_args, result)
return result
# split the input and output arrays into numthreads chunks
split0 = np.array_split(unixtime, numthreads)
split2 = np.array_split(result, numthreads, axis=1)
chunks = [[a0, loc_args, split2[i]] for i, a0 in enumerate(split0)]
# Spawn one thread per chunk
threads = [threading.Thread(target=solar_position_loop, args=chunk)
for chunk in chunks]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return result
def solar_position_numpy(unixtime, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads, sst=False, esd=False):
"""Calculate the solar position assuming unixtime is a numpy array. Note
this function will not work if the solar position functions were
compiled with numba.
"""
jd = julian_day(unixtime)
jde = julian_ephemeris_day(jd, delta_t)
jc = julian_century(jd)
jce = julian_ephemeris_century(jde)
jme = julian_ephemeris_millennium(jce)
R = heliocentric_radius_vector(jme)
if esd:
return (R, )
L = heliocentric_longitude(jme)
B = heliocentric_latitude(jme)
Theta = geocentric_longitude(L)
beta = geocentric_latitude(B)
x0 = mean_elongation(jce)
x1 = mean_anomaly_sun(jce)
x2 = mean_anomaly_moon(jce)
x3 = moon_argument_latitude(jce)
x4 = moon_ascending_longitude(jce)
delta_psi = longitude_nutation(jce, x0, x1, x2, x3, x4)
delta_epsilon = obliquity_nutation(jce, x0, x1, x2, x3, x4)
epsilon0 = mean_ecliptic_obliquity(jme)
epsilon = true_ecliptic_obliquity(epsilon0, delta_epsilon)
delta_tau = aberration_correction(R)
lamd = apparent_sun_longitude(Theta, delta_psi, delta_tau)
v0 = mean_sidereal_time(jd, jc)
v = apparent_sidereal_time(v0, delta_psi, epsilon)
alpha = geocentric_sun_right_ascension(lamd, epsilon, beta)
delta = geocentric_sun_declination(lamd, epsilon, beta)
if sst:
return v, alpha, delta
m = sun_mean_longitude(jme)
eot = equation_of_time(m, alpha, delta_psi, epsilon)
H = local_hour_angle(v, lon, alpha)
xi = equatorial_horizontal_parallax(R)
u = uterm(lat)
x = xterm(u, lat, elev)
y = yterm(u, lat, elev)
delta_alpha = parallax_sun_right_ascension(x, xi, H, delta)
delta_prime = topocentric_sun_declination(delta, x, y, xi, delta_alpha, H)
H_prime = topocentric_local_hour_angle(H, delta_alpha)
e0 = topocentric_elevation_angle_without_atmosphere(lat, delta_prime,
H_prime)
delta_e = atmospheric_refraction_correction(pressure, temp, e0,
atmos_refract)
e = topocentric_elevation_angle(e0, delta_e)
theta = topocentric_zenith_angle(e)
theta0 = topocentric_zenith_angle(e0)
gamma = topocentric_astronomers_azimuth(H_prime, delta_prime, lat)
phi = topocentric_azimuth_angle(gamma)
return theta, theta0, e, e0, phi, eot
def solar_position(unixtime, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads=8, sst=False, esd=False):
"""
Calculate the solar position using the
NREL SPA algorithm described in [1].
If numba is installed, the functions can be compiled
and the code runs quickly. If not, the functions
still evaluate but use numpy instead.
Parameters
----------
unixtime : numpy array
Array of unix/epoch timestamps to calculate solar position for.
Unixtime is the number of seconds since Jan. 1, 1970 00:00:00 UTC.
A pandas.DatetimeIndex is easily converted using .astype(np.int64)/10**9
lat : float
Latitude to calculate solar position for
lon : float
Longitude to calculate solar position for
elev : float
Elevation of location in meters
pressure : int or float
avg. yearly pressure at location in millibars;
used for atmospheric correction
temp : int or float
avg. yearly temperature at location in
degrees C; used for atmospheric correction
delta_t : float, optional
If delta_t is None, uses spa.calculate_deltat
using time.year and time.month from pandas.DatetimeIndex.
For most simulations specifing delta_t is sufficient.
Difference between terrestrial time and UT1.
*Note: delta_t = None will break code using nrel_numba,
this will be fixed in a future version.
By default, use USNO historical data and predictions
atmos_refrac : float, optional
The approximate atmospheric refraction (in degrees)
at sunrise and sunset.
numthreads: int, optional, default 8
Number of threads to use for computation if numba>=0.17
is installed.
sst : bool, default False
If True, return only data needed for sunrise, sunset, and transit
calculations.
esd : bool, default False
If True, return only Earth-Sun distance in AU
Returns
-------
Numpy Array with elements:
apparent zenith,
zenith,
elevation,
apparent_elevation,
azimuth,
equation_of_time
References
----------
[1] I. Reda and A. Andreas, Solar position algorithm for solar radiation
applications. Solar Energy, vol. 76, no. 5, pp. 577-589, 2004.
[2] I. Reda and A. Andreas, Corrigendum to Solar position algorithm for
solar radiation applications. Solar Energy, vol. 81, no. 6, p. 838, 2007.
"""
if USE_NUMBA:
do_calc = solar_position_numba
else:
do_calc = solar_position_numpy
result = do_calc(unixtime, lat, lon, elev, pressure,
temp, delta_t, atmos_refract, numthreads,
sst, esd)
if not isinstance(result, np.ndarray):
try:
result = np.array(result)
except Exception:
pass
return result
def transit_sunrise_sunset(dates, lat, lon, delta_t, numthreads):
"""
Calculate the sun transit, sunrise, and sunset
for a set of dates at a given location.
Parameters
----------
dates : array
Numpy array of ints/floats corresponding to the Unix time
for the dates of interest, must be midnight UTC (00:00+00:00)
on the day of interest.
lat : float
Latitude of location to perform calculation for
lon : float
Longitude of location
delta_t : float
Difference between terrestrial time and UT. USNO has tables.
numthreads : int
Number to threads to use for calculation (if using numba)
Returns
-------
tuple : (transit, sunrise, sunset) localized to UTC
"""
if ((dates % 86400) != 0.0).any():
raise ValueError('Input dates must be at 00:00 UTC')
utday = (dates // 86400) * 86400
ttday0 = utday - delta_t
ttdayn1 = ttday0 - 86400
ttdayp1 = ttday0 + 86400
# index 0 is v, 1 is alpha, 2 is delta
utday_res = solar_position(utday, 0, 0, 0, 0, 0, delta_t,
0, numthreads, sst=True)
v = utday_res[0]
ttday0_res = solar_position(ttday0, 0, 0, 0, 0, 0, delta_t,
0, numthreads, sst=True)
ttdayn1_res = solar_position(ttdayn1, 0, 0, 0, 0, 0, delta_t,
0, numthreads, sst=True)
ttdayp1_res = solar_position(ttdayp1, 0, 0, 0, 0, 0, delta_t,
0, numthreads, sst=True)
m0 = (ttday0_res[1] - lon - v) / 360
cos_arg = ((np.sin(np.radians(-0.8333)) - np.sin(np.radians(lat))
* np.sin(np.radians(ttday0_res[2]))) /
(np.cos(np.radians(lat)) * np.cos(np.radians(ttday0_res[2]))))
cos_arg[abs(cos_arg) > 1] = np.nan
H0 = np.degrees(np.arccos(cos_arg)) % 180
m = np.empty((3, len(utday)))
m[0] = m0 % 1
m[1] = (m[0] - H0 / 360)
m[2] = (m[0] + H0 / 360)
# need to account for fractions of day that may be the next or previous
# day in UTC
add_a_day = m[2] >= 1
sub_a_day = m[1] < 0
m[1] = m[1] % 1
m[2] = m[2] % 1
vs = v + 360.985647 * m
n = m + delta_t / 86400
a = ttday0_res[1] - ttdayn1_res[1]
a[abs(a) > 2] = a[abs(a) > 2] % 1
ap = ttday0_res[2] - ttdayn1_res[2]
ap[abs(ap) > 2] = ap[abs(ap) > 2] % 1
b = ttdayp1_res[1] - ttday0_res[1]
b[abs(b) > 2] = b[abs(b) > 2] % 1
bp = ttdayp1_res[2] - ttday0_res[2]
bp[abs(bp) > 2] = bp[abs(bp) > 2] % 1
c = b - a
cp = bp - ap
alpha_prime = ttday0_res[1] + (n * (a + b + c * n)) / 2
delta_prime = ttday0_res[2] + (n * (ap + bp + cp * n)) / 2
Hp = (vs + lon - alpha_prime) % 360
Hp[Hp >= 180] = Hp[Hp >= 180] - 360
h = np.degrees(np.arcsin(np.sin(np.radians(lat)) *
np.sin(np.radians(delta_prime)) +
np.cos(np.radians(lat)) *
np.cos(np.radians(delta_prime))
* np.cos(np.radians(Hp))))
T = (m[0] - Hp[0] / 360) * 86400
R = (m[1] + (h[1] + 0.8333) / (360 * np.cos(np.radians(delta_prime[1])) *
np.cos(np.radians(lat)) *
np.sin(np.radians(Hp[1])))) * 86400
S = (m[2] + (h[2] + 0.8333) / (360 * np.cos(np.radians(delta_prime[2])) *
np.cos(np.radians(lat)) *
np.sin(np.radians(Hp[2])))) * 86400
S[add_a_day] += 86400
R[sub_a_day] -= 86400
transit = T + utday
sunrise = R + utday
sunset = S + utday
return transit, sunrise, sunset
def earthsun_distance(unixtime, delta_t, numthreads):
"""
Calculates the distance from the earth to the sun using the
NREL SPA algorithm described in [1].
Parameters
----------
unixtime : numpy array
Array of unix/epoch timestamps to calculate solar position for.
Unixtime is the number of seconds since Jan. 1, 1970 00:00:00 UTC.
A pandas.DatetimeIndex is easily converted using .astype(np.int64)/10**9
delta_t : float
Difference between terrestrial time and UT. USNO has tables.
numthreads : int
Number to threads to use for calculation (if using numba)
Returns
-------
R : array
Earth-Sun distance in AU.
References
----------
[1] Reda, I., Andreas, A., 2003. Solar position algorithm for solar
radiation applications. Technical report: NREL/TP-560- 34302. Golden,
USA, http://www.nrel.gov.
"""
R = solar_position(unixtime, 0, 0, 0, 0, 0, delta_t,
0, numthreads, esd=True)[0]
return R
def calculate_deltat(year, month):
"""Calculate the difference between Terrestrial Dynamical Time (TD)
and Universal Time (UT).
Note: This function is not yet compatible for calculations using
Numba.
Equations taken from http://eclipse.gsfc.nasa.gov/SEcat5/deltatpoly.html
"""
plw = 'Deltat is unknown for years before -1999 and after 3000. ' \
'Delta values will be calculated, but the calculations ' \
'are not intended to be used for these years.'
try:
if np.any((year > 3000) | (year < -1999)):
warnings.warn(plw)
except ValueError:
if (year > 3000) | (year < -1999):
warnings.warn(plw)
except TypeError:
return 0
y = year + (month - 0.5)/12
deltat = np.where(year < -500,
-20+32*((y-1820)/100)**2, 0)
deltat = np.where((-500 <= year) & (year < 500),
10583.6-1014.41*(y/100)
+ 33.78311*(y/100)**2
- 5.952053*(y/100)**3
- 0.1798452*(y/100)**4
+ 0.022174192*(y/100)**5
+ 0.0090316521*(y/100)**6, deltat)
deltat = np.where((500 <= year) & (year < 1600),
1574.2-556.01*((y-1000)/100)
+ 71.23472*((y-1000)/100)**2
+ 0.319781*((y-1000)/100)**3
- 0.8503463*((y-1000)/100)**4
- 0.005050998*((y-1000)/100)**5
+ 0.0083572073*((y-1000)/100)**6, deltat)
deltat = np.where((1600 <= year) & (year < 1700),
120-0.9808*(y-1600)
- 0.01532*(y-1600)**2
+ (y-1600)**3/7129, deltat)
deltat = np.where((1700 <= year) & (year < 1800),
8.83+0.1603*(y-1700)
- 0.0059285*(y-1700)**2
+ 0.00013336*(y-1700)**3
- (y-1700)**4/1174000, deltat)
deltat = np.where((1800 <= year) & (year < 1860),
13.72-0.332447*(y-1800)
+ 0.0068612*(y-1800)**2
+ 0.0041116*(y-1800)**3
- 0.00037436*(y-1800)**4
+ 0.0000121272*(y-1800)**5
- 0.0000001699*(y-1800)**6
+ 0.000000000875*(y-1800)**7, deltat)
deltat = np.where((1860 <= year) & (year < 1900),
7.62+0.5737*(y-1860)
- 0.251754*(y-1860)**2
+ 0.01680668*(y-1860)**3
- 0.0004473624*(y-1860)**4
+ (y-1860)**5/233174, deltat)
deltat = np.where((1900 <= year) & (year < 1920),
-2.79+1.494119*(y-1900)
- 0.0598939*(y-1900)**2
+ 0.0061966*(y-1900)**3
- 0.000197*(y-1900)**4, deltat)
deltat = np.where((1920 <= year) & (year < 1941),
21.20+0.84493*(y-1920)
- 0.076100*(y-1920)**2
+ 0.0020936*(y-1920)**3, deltat)
deltat = np.where((1941 <= year) & (year < 1961),
29.07+0.407*(y-1950)
- (y-1950)**2/233
+ (y-1950)**3/2547, deltat)
deltat = np.where((1961 <= year) & (year < 1986),
45.45+1.067*(y-1975)
- (y-1975)**2/260
- (y-1975)**3/718, deltat)
deltat = np.where((1986 <= year) & (year < 2005),
63.86+0.3345*(y-2000)
- 0.060374*(y-2000)**2
+ 0.0017275*(y-2000)**3
+ 0.000651814*(y-2000)**4
+ 0.00002373599*(y-2000)**5, deltat)
deltat = np.where((2005 <= year) & (year < 2050),
62.92+0.32217*(y-2000)
+ 0.005589*(y-2000)**2, deltat)
deltat = np.where((2050 <= year) & (year < 2150),
-20+32*((y-1820)/100)**2
- 0.5628*(2150-y), deltat)
deltat = np.where(year >= 2150,
-20+32*((y-1820)/100)**2, deltat)
deltat = deltat.item() if np.isscalar(year) & np.isscalar(month)\
else deltat
return deltat
|
bsd-3-clause
|
winklerand/pandas
|
asv_bench/benchmarks/binary_ops.py
|
5
|
4124
|
import numpy as np
from pandas import DataFrame, Series, date_range
from pandas.core.algorithms import checked_add_with_arr
try:
import pandas.core.computation.expressions as expr
except ImportError:
import pandas.computation.expressions as expr
from .pandas_vb_common import setup # noqa
class Ops(object):
goal_time = 0.2
params = [[True, False], ['default', 1]]
param_names = ['use_numexpr', 'threads']
def setup(self, use_numexpr, threads):
self.df = DataFrame(np.random.randn(20000, 100))
self.df2 = DataFrame(np.random.randn(20000, 100))
if threads != 'default':
expr.set_numexpr_threads(threads)
if not use_numexpr:
expr.set_use_numexpr(False)
def time_frame_add(self, use_numexpr, threads):
self.df + self.df2
def time_frame_mult(self, use_numexpr, threads):
self.df * self.df2
def time_frame_multi_and(self, use_numexpr, threads):
self.df[(self.df > 0) & (self.df2 > 0)]
def time_frame_comparison(self, use_numexpr, threads):
self.df > self.df2
def teardown(self, use_numexpr, threads):
expr.set_use_numexpr(True)
expr.set_numexpr_threads()
class Ops2(object):
goal_time = 0.2
def setup(self):
N = 10**3
self.df = DataFrame(np.random.randn(N, N))
self.df2 = DataFrame(np.random.randn(N, N))
self.df_int = DataFrame(np.random.randint(np.iinfo(np.int16).min,
np.iinfo(np.int16).max,
size=(N, N)))
self.df2_int = DataFrame(np.random.randint(np.iinfo(np.int16).min,
np.iinfo(np.int16).max,
size=(N, N)))
# Division
def time_frame_float_div(self):
self.df // self.df2
def time_frame_float_div_by_zero(self):
self.df / 0
def time_frame_float_floor_by_zero(self):
self.df // 0
def time_frame_int_div_by_zero(self):
self.df_int / 0
# Modulo
def time_frame_int_mod(self):
self.df_int % self.df2_int
def time_frame_float_mod(self):
self.df % self.df2
class Timeseries(object):
goal_time = 0.2
params = [None, 'US/Eastern']
param_names = ['tz']
def setup(self, tz):
N = 10**6
halfway = (N // 2) - 1
self.s = Series(date_range('20010101', periods=N, freq='T', tz=tz))
self.ts = self.s[halfway]
self.s2 = Series(date_range('20010101', periods=N, freq='s', tz=tz))
def time_series_timestamp_compare(self, tz):
self.s <= self.ts
def time_timestamp_series_compare(self, tz):
self.ts >= self.s
def time_timestamp_ops_diff(self, tz):
self.s2.diff()
def time_timestamp_ops_diff_with_shift(self, tz):
self.s - self.s.shift()
class AddOverflowScalar(object):
goal_time = 0.2
params = [1, -1, 0]
param_names = ['scalar']
def setup(self, scalar):
N = 10**6
self.arr = np.arange(N)
def time_add_overflow_scalar(self, scalar):
checked_add_with_arr(self.arr, scalar)
class AddOverflowArray(object):
goal_time = 0.2
def setup(self):
N = 10**6
self.arr = np.arange(N)
self.arr_rev = np.arange(-N, 0)
self.arr_mixed = np.array([1, -1]).repeat(N / 2)
self.arr_nan_1 = np.random.choice([True, False], size=N)
self.arr_nan_2 = np.random.choice([True, False], size=N)
def time_add_overflow_arr_rev(self):
checked_add_with_arr(self.arr, self.arr_rev)
def time_add_overflow_arr_mask_nan(self):
checked_add_with_arr(self.arr, self.arr_mixed, arr_mask=self.arr_nan_1)
def time_add_overflow_b_mask_nan(self):
checked_add_with_arr(self.arr, self.arr_mixed,
b_mask=self.arr_nan_1)
def time_add_overflow_both_arg_nan(self):
checked_add_with_arr(self.arr, self.arr_mixed, arr_mask=self.arr_nan_1,
b_mask=self.arr_nan_2)
|
bsd-3-clause
|
jasonmccampbell/scipy-refactor
|
doc/source/tutorial/examples/newton_krylov_preconditioning.py
|
7
|
2444
|
import numpy as np
from scipy.optimize import newton_krylov
from scipy.sparse import spdiags, spkron
from scipy.sparse.linalg import spilu, LinearOperator
from numpy import cosh, zeros_like, mgrid, zeros, eye
# parameters
nx, ny = 75, 75
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
def get_preconditioner():
"""Compute the preconditioner M"""
diags_x = zeros((3, nx))
diags_x[0,:] = 1/hx/hx
diags_x[1,:] = -2/hx/hx
diags_x[2,:] = 1/hx/hx
Lx = spdiags(diags_x, [-1,0,1], nx, nx)
diags_y = zeros((3, ny))
diags_y[0,:] = 1/hy/hy
diags_y[1,:] = -2/hy/hy
diags_y[2,:] = 1/hy/hy
Ly = spdiags(diags_y, [-1,0,1], ny, ny)
J1 = spkron(Lx, eye(ny)) + spkron(eye(nx), Ly)
# Now we have the matrix `J_1`. We need to find its inverse `M` --
# however, since an approximate inverse is enough, we can use
# the *incomplete LU* decomposition
J1_ilu = spilu(J1)
# This returns an object with a method .solve() that evaluates
# the corresponding matrix-vector product. We need to wrap it into
# a LinearOperator before it can be passed to the Krylov methods:
M = LinearOperator(shape=(nx*ny, nx*ny), matvec=J1_ilu.solve)
return M
def solve(preconditioning=True):
"""Compute the solution"""
count = [0]
def residual(P):
count[0] += 1
d2x = zeros_like(P)
d2y = zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2])/hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y + 5*cosh(P).mean()**2
# preconditioner
if preconditioning:
M = get_preconditioner()
else:
M = None
# solve
guess = zeros((nx, ny), float)
sol = newton_krylov(residual, guess, verbose=1, inner_M=M)
print 'Residual', abs(residual(sol)).max()
print 'Evaluations', count[0]
return sol
def main():
sol = solve(preconditioning=True)
# visualize
import matplotlib.pyplot as plt
x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
plt.clf()
plt.pcolor(x, y, sol)
plt.clim(0, 1)
plt.colorbar()
plt.show()
if __name__ == "__main__":
main()
|
bsd-3-clause
|
bikash/kaggleCompetition
|
microsoft malware/Malware_Say_No_To_Overfitting/kaggle_Microsoft_malware_small/semi_best_gen.py
|
1
|
7097
|
from sklearn.ensemble import RandomForestClassifier as RF
from sklearn.linear_model import LogisticRegression as LGR
from sklearn.ensemble import GradientBoostingClassifier as GBC
from sklearn.ensemble import ExtraTreesClassifier as ET
from xgboost_multi import XGBC
from sklearn import cross_validation
from sklearn.cross_validation import StratifiedKFold as KFold
from sklearn.metrics import log_loss
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfTransformer
import pickle
from sklearn.linear_model import SGDClassifier as SGD
# create model_list
def get_model_list():
model_list = []
for num_round in [200]:
for max_depth in [2]:
for eta in [0.25]:
for min_child_weight in [2]:
for col_sample in [1]:
model_list.append((XGBC(num_round = num_round, max_depth = max_depth, eta = eta,
min_child_weight = min_child_weight, colsample_bytree = col_sample),
'xgb_tree_%i_depth_%i_lr_%f_child_%i_col_sample_%i'%(num_round, max_depth, eta, min_child_weight,col_sample)))
return model_list
def gen_data():
# the 4k features!
the_train = pickle.load(open('X33_train_reproduce.p','rb'))
the_test = pickle.load(open('X33_test_reproduce.p','rb'))
# corresponding id and labels
Id = pickle.load(open('xid.p','rb'))
labels = pickle.load(open('y.p','rb'))
Id_test = pickle.load(open('Xt_id.p','rb'))
# merge them into pandas
join_train = np.column_stack((Id, the_train, labels))
join_test = np.column_stack((Id_test, the_test))
train = pd.DataFrame(join_train, columns=['Id']+['the_fea%i'%i for i in xrange(the_train.shape[1])] + ['Class'])
test = pd.DataFrame(join_test, columns=['Id']+['the_fea%i'%i for i in xrange(the_train.shape[1])])
del join_train, join_test
# convert into numeric features
train = train.convert_objects(convert_numeric=True)
test = test.convert_objects(convert_numeric=True)
# including more things
train_count = pd.read_csv("train_frequency.csv")
test_count = pd.read_csv("test_frequency.csv")
train = pd.merge(train, train_count, on='Id')
test = pd.merge(test, test_count, on='Id')
# instr count
train_instr_count = pd.read_csv("train_instr_frequency.csv")
test_instr_count = pd.read_csv("test_instr_frequency.csv")
for n in list(train_instr_count)[1:]:
if np.sum(train_instr_count[n]) == 0:
del train_instr_count[n]
del test_instr_count[n]
train_instr_freq = train_instr_count.copy()
test_instr_freq = test_instr_count.copy()
train_instr_freq.ix[:,1:] = train_instr_freq.ix[:,1:].apply(lambda x: x/np.sum(x), axis = 1)
train_instr_freq = train_instr_freq.replace(np.nan, 0)
test_instr_freq.ix[:,1:]=test_instr_freq.ix[:,1:].apply(lambda x: x/np.sum(x), axis = 1)
test_instr_freq = test_instr_freq.replace(np.nan, 0)
train = pd.merge(train, train_instr_freq, on='Id')
test = pd.merge(test, test_instr_freq, on='Id')
## all right, include more!
grams_train = pd.read_csv("train_data_750.csv")
grams_test = pd.read_csv("test_data_750.csv")
# daf features
#train_daf = pd.read_csv("train_daf.csv")
#test_daf = pd.read_csv("test_daf.csv")
#daf_list = [0,165,91,60,108,84,42,93,152,100] #daf list for 500 grams.
# dll features
train_dll = pd.read_csv("train_dll.csv")
test_dll = pd.read_csv("test_dll.csv")
# merge all them
#mine = pd.merge(grams_train, train_daf,on='Id')
mine = grams_train
mine = pd.merge(mine, train_dll, on='Id')
mine_labels = pd.read_csv("trainLabels.csv")
mine = pd.merge(mine, mine_labels, on='Id')
mine_labels = mine.Class
mine_Id = mine.Id
del mine['Class']
del mine['Id']
mine = mine.as_matrix()
#mine_test = pd.merge(grams_test, test_daf,on='Id')
mine_test = grams_test
mine_test = pd.merge(mine_test, test_dll,on='Id')
mine_test_id = mine_test.Id
del mine_test['Id']
clf_se = RF(n_estimators=500, n_jobs=-1,random_state = 0)
clf_se.fit(mine,mine_labels)
mine_train = np.array(clf_se.transform(mine, '1.25*mean'))
mine_test = np.array(clf_se.transform(mine_test, '1.25*mean'))
train_mine = pd.DataFrame(np.column_stack((mine_Id, mine_train)), columns=['Id']+['mine_'+str(x) for x in xrange(mine_train.shape[1])]).convert_objects(convert_numeric=True)
test_mine = pd.DataFrame(np.column_stack((mine_test_id, mine_test)), columns=['Id']+['mine_'+str(x) for x in xrange(mine_test.shape[1])]).convert_objects(convert_numeric=True)
train = pd.merge(train, train_mine, on='Id')
test = pd.merge(test, test_mine, on='Id')
train_image = pd.read_csv("train_asm_image.csv", usecols=['Id']+['asm_%i'%i for i in xrange(800)])
test_image = pd.read_csv("test_asm_image.csv", usecols=['Id']+['asm_%i'%i for i in xrange(800)])
train = pd.merge(train, train_image, on='Id')
test = pd.merge(test, test_image, on='Id')
semi_labels = pd.read_csv('ensemble.csv')
semi_id = semi_labels.Id
semi_labels = np.array([int(x[-1]) for x in semi_labels.ix[:,1:].idxmax(1)])
semi_labels = np.column_stack((semi_id, semi_labels))
semi_labels = pd.DataFrame(semi_labels, columns = ['Id','Class']).convert_objects(convert_numeric=True)
test = pd.merge(test, semi_labels, on='Id', how='inner')
print train.shape, test.shape
return train, test
def semi_learning(model_list):
# read in data
print "read data and prepare modelling..."
train, test = gen_data()
X = train
Id = X.Id
labels = X.Class - 1 # for the purpose of using multilogloss fun.
del X['Id']
del X['Class']
X = X.as_matrix()
X_test = test
id_test = X_test.Id
labels_test = X_test.Class - 1
del X_test['Id']
del X_test['Class']
X_test = X_test.as_matrix()
kf = KFold(labels_test, n_folds=4) # 4 folds
for j, (clf, clf_name) in enumerate(model_list):
print "modelling %s"%clf_name
stack_train = np.zeros((len(id_test),9)) # 9 classes.
for i, (train_fold, validate) in enumerate(kf):
X_train, X_validate, labels_train, labels_validate = X_test[train_fold,:], X_test[validate,:], labels_test[train_fold], labels_test[validate]
X_train = np.concatenate((X, X_train))
labels_train = np.concatenate((labels, labels_train))
clf.fit(X_train,labels_train)
stack_train[validate] = clf.predict_proba(X_validate).reshape(X_validate.shape[0],9)
pred = np.column_stack((id_test, stack_train))
submission = pd.DataFrame(pred, columns=['Id']+['Prediction%i'%i for i in xrange(1,10)])
submission = submission.convert_objects(convert_numeric=True)
submission.to_csv('best_submission.csv',index = False)
if __name__ == '__main__':
model_list = get_model_list()
semi_learning(model_list)
print "ALL DONE!!!"
|
apache-2.0
|
Lstyle1/Deep_learning_projects
|
weight-initialization/helper.py
|
153
|
3649
|
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def hist_dist(title, distribution_tensor, hist_range=(-4, 4)):
"""
Display histogram of a TF distribution
"""
with tf.Session() as sess:
values = sess.run(distribution_tensor)
plt.title(title)
plt.hist(values, np.linspace(*hist_range, num=len(values)/2))
plt.show()
def _get_loss_acc(dataset, weights):
"""
Get losses and validation accuracy of example neural network
"""
batch_size = 128
epochs = 2
learning_rate = 0.001
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
learn_rate = tf.placeholder(tf.float32)
biases = [
tf.Variable(tf.zeros([256])),
tf.Variable(tf.zeros([128])),
tf.Variable(tf.zeros([dataset.train.labels.shape[1]]))
]
# Layers
layer_1 = tf.nn.relu(tf.matmul(features, weights[0]) + biases[0])
layer_2 = tf.nn.relu(tf.matmul(layer_1, weights[1]) + biases[1])
logits = tf.matmul(layer_2, weights[2]) + biases[2]
# Training loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# Optimizer
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(loss)
# Accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Measurements use for graphing loss
loss_batch = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
batch_count = int((dataset.train.num_examples / batch_size))
# The training cycle
for epoch_i in range(epochs):
for batch_i in range(batch_count):
batch_features, batch_labels = dataset.train.next_batch(batch_size)
# Run optimizer and get loss
session.run(
optimizer,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
l = session.run(
loss,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
loss_batch.append(l)
valid_acc = session.run(
accuracy,
feed_dict={features: dataset.validation.images, labels: dataset.validation.labels, learn_rate: 1.0})
# Hack to Reset batches
dataset.train._index_in_epoch = 0
dataset.train._epochs_completed = 0
return loss_batch, valid_acc
def compare_init_weights(
dataset,
title,
weight_init_list,
plot_n_batches=100):
"""
Plot loss and print stats of weights using an example neural network
"""
colors = ['r', 'b', 'g', 'c', 'y', 'k']
label_accs = []
label_loss = []
assert len(weight_init_list) <= len(colors), 'Too many inital weights to plot'
for i, (weights, label) in enumerate(weight_init_list):
loss, val_acc = _get_loss_acc(dataset, weights)
plt.plot(loss[:plot_n_batches], colors[i], label=label)
label_accs.append((label, val_acc))
label_loss.append((label, loss[-1]))
plt.title(title)
plt.xlabel('Batches')
plt.ylabel('Loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
print('After 858 Batches (2 Epochs):')
print('Validation Accuracy')
for label, val_acc in label_accs:
print(' {:7.3f}% -- {}'.format(val_acc*100, label))
print('Loss')
for label, loss in label_loss:
print(' {:7.3f} -- {}'.format(loss, label))
|
mit
|
schets/scikit-learn
|
sklearn/metrics/cluster/__init__.py
|
312
|
1322
|
"""
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
|
bsd-3-clause
|
wdbm/abstraction
|
setup.py
|
1
|
2157
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import setuptools
def main():
setuptools.setup(
name = 'abstraction',
version = '2018.01.11.2110',
description = 'machine learning framework',
long_description = long_description(),
url = 'https://github.com/wdbm/abstraction',
author = 'Will Breaden Madden',
author_email = '[email protected]',
license = 'GPLv3',
py_modules = [
'abstraction'
],
install_requires = [
'beautifulsoup4',
'dataset',
'datavision',
'docopt',
'flask',
'gpudeets',
'jupyter',
'lxml',
'numpy',
'matplotlib',
'praw',
'propyte',
'pyprel',
'seaborn',
'shijian',
'sklearn',
'technicolor',
'tmux_control',
'tonescale'
],
scripts = [
'abstraction_generate_response.py',
'abstraction_interface.py'
],
entry_points = """
[console_scripts]
abstraction = abstraction:abstraction
"""
)
def long_description(
filename = 'README.md'
):
if os.path.isfile(os.path.expandvars(filename)):
try:
import pypandoc
long_description = pypandoc.convert_file(filename, 'rst')
except ImportError:
long_description = open(filename).read()
else:
long_description = ''
return long_description
if __name__ == '__main__':
main()
|
gpl-3.0
|
dssg/wikienergy
|
disaggregator/build/pandas/pandas/io/tests/test_data.py
|
1
|
17790
|
from __future__ import print_function
from pandas import compat
import warnings
import nose
from nose.tools import assert_equal
from datetime import datetime
import os
import numpy as np
import pandas as pd
from pandas import DataFrame, Timestamp
from pandas.io import data as web
from pandas.io.data import DataReader, SymbolWarning, RemoteDataError, _yahoo_codes
from pandas.util.testing import (assert_series_equal, assert_produces_warning,
network, assert_frame_equal)
import pandas.util.testing as tm
from numpy.testing import assert_array_equal
if compat.PY3:
from urllib.error import HTTPError
else:
from urllib2 import HTTPError
def _skip_if_no_lxml():
try:
import lxml
except ImportError:
raise nose.SkipTest("no lxml")
def assert_n_failed_equals_n_null_columns(wngs, obj, cls=SymbolWarning):
all_nan_cols = pd.Series(dict((k, pd.isnull(v).all()) for k, v in
compat.iteritems(obj)))
n_all_nan_cols = all_nan_cols.sum()
valid_warnings = pd.Series([wng for wng in wngs if isinstance(wng, cls)])
assert_equal(len(valid_warnings), n_all_nan_cols)
failed_symbols = all_nan_cols[all_nan_cols].index
msgs = valid_warnings.map(lambda x: x.message)
assert msgs.str.contains('|'.join(failed_symbols)).all()
class TestGoogle(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestGoogle, cls).setUpClass()
cls.locales = tm.get_locales(prefix='en_US')
if not cls.locales:
raise nose.SkipTest("US English locale not available for testing")
@classmethod
def tearDownClass(cls):
super(TestGoogle, cls).tearDownClass()
del cls.locales
@network
def test_google(self):
# asserts that google is minimally working and that it throws
# an exception when DataReader can't get a 200 response from
# google
start = datetime(2010, 1, 1)
end = datetime(2013, 1, 27)
for locale in self.locales:
with tm.set_locale(locale):
panel = web.DataReader("F", 'google', start, end)
self.assertEqual(panel.Close[-1], 13.68)
self.assertRaises(Exception, web.DataReader, "NON EXISTENT TICKER",
'google', start, end)
@network
def test_get_quote_fails(self):
self.assertRaises(NotImplementedError, web.get_quote_google,
pd.Series(['GOOG', 'AAPL', 'GOOG']))
@network
def test_get_goog_volume(self):
for locale in self.locales:
with tm.set_locale(locale):
df = web.get_data_google('GOOG').sort_index()
self.assertEqual(df.Volume.ix['OCT-08-2010'], 2863473)
@network
def test_get_multi1(self):
for locale in self.locales:
sl = ['AAPL', 'AMZN', 'GOOG']
with tm.set_locale(locale):
pan = web.get_data_google(sl, '2012')
ts = pan.Close.GOOG.index[pan.Close.AAPL > pan.Close.GOOG]
if (hasattr(pan, 'Close') and hasattr(pan.Close, 'GOOG') and
hasattr(pan.Close, 'AAPL')):
self.assertEqual(ts[0].dayofyear, 96)
else:
self.assertRaises(AttributeError, lambda: pan.Close)
@network
def test_get_multi_invalid(self):
sl = ['AAPL', 'AMZN', 'INVALID']
pan = web.get_data_google(sl, '2012')
self.assertIn('INVALID', pan.minor_axis)
@network
def test_get_multi2(self):
with warnings.catch_warnings(record=True) as w:
for locale in self.locales:
with tm.set_locale(locale):
pan = web.get_data_google(['GE', 'MSFT', 'INTC'],
'JAN-01-12', 'JAN-31-12')
result = pan.Close.ix['01-18-12']
assert_n_failed_equals_n_null_columns(w, result)
# sanity checking
assert np.issubdtype(result.dtype, np.floating)
result = pan.Open.ix['Jan-15-12':'Jan-20-12']
self.assertEqual((4, 3), result.shape)
assert_n_failed_equals_n_null_columns(w, result)
def test_dtypes(self):
#GH3995, #GH8980
data = web.get_data_google('F', start='JAN-01-10', end='JAN-27-13')
assert np.issubdtype(data.Open.dtype, np.number)
assert np.issubdtype(data.Close.dtype, np.number)
assert np.issubdtype(data.Low.dtype, np.number)
assert np.issubdtype(data.High.dtype, np.number)
assert np.issubdtype(data.Volume.dtype, np.number)
class TestYahoo(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestYahoo, cls).setUpClass()
_skip_if_no_lxml()
@network
def test_yahoo(self):
# asserts that yahoo is minimally working and that it throws
# an exception when DataReader can't get a 200 response from
# yahoo
start = datetime(2010, 1, 1)
end = datetime(2013, 1, 27)
self.assertEqual(web.DataReader("F", 'yahoo', start, end)['Close'][-1],
13.68)
@network
def test_yahoo_fails(self):
start = datetime(2010, 1, 1)
end = datetime(2013, 1, 27)
self.assertRaises(Exception, web.DataReader, "NON EXISTENT TICKER",
'yahoo', start, end)
@network
def test_get_quote_series(self):
df = web.get_quote_yahoo(pd.Series(['GOOG', 'AAPL', 'GOOG']))
assert_series_equal(df.ix[0], df.ix[2])
@network
def test_get_quote_string(self):
df = web.get_quote_yahoo('GOOG')
@network
def test_get_quote_string(self):
_yahoo_codes.update({'MarketCap': 'j1'})
df = web.get_quote_yahoo('GOOG')
self.assertFalse(pd.isnull(df['MarketCap'][0]))
@network
def test_get_quote_stringlist(self):
df = web.get_quote_yahoo(['GOOG', 'AAPL', 'GOOG'])
assert_series_equal(df.ix[0], df.ix[2])
@network
def test_get_components_dow_jones(self):
raise nose.SkipTest('unreliable test, receive partial components back for dow_jones')
df = web.get_components_yahoo('^DJI') #Dow Jones
assert isinstance(df, pd.DataFrame)
self.assertEqual(len(df), 30)
@network
def test_get_components_dax(self):
raise nose.SkipTest('unreliable test, receive partial components back for dax')
df = web.get_components_yahoo('^GDAXI') #DAX
assert isinstance(df, pd.DataFrame)
self.assertEqual(len(df), 30)
self.assertEqual(df[df.name.str.contains('adidas', case=False)].index,
'ADS.DE')
@network
def test_get_components_nasdaq_100(self):
# as of 7/12/13 the conditional will test false because the link is invalid
raise nose.SkipTest('unreliable test, receive partial components back for nasdaq_100')
df = web.get_components_yahoo('^NDX') #NASDAQ-100
assert isinstance(df, pd.DataFrame)
if len(df) > 1:
# Usual culprits, should be around for a while
assert 'AAPL' in df.index
assert 'GOOG' in df.index
assert 'AMZN' in df.index
else:
expected = DataFrame({'exchange': 'N/A', 'name': '@^NDX'},
index=['@^NDX'])
assert_frame_equal(df, expected)
@network
def test_get_data_single_symbol(self):
#single symbol
#http://finance.yahoo.com/q/hp?s=GOOG&a=09&b=08&c=2010&d=09&e=10&f=2010&g=d
# just test that we succeed
web.get_data_yahoo('GOOG')
@network
def test_get_data_multiple_symbols(self):
# just test that we succeed
sl = ['AAPL', 'AMZN', 'GOOG']
web.get_data_yahoo(sl, '2012')
@network
def test_get_data_multiple_symbols_two_dates(self):
pan = web.get_data_yahoo(['GE', 'MSFT', 'INTC'], 'JAN-01-12',
'JAN-31-12')
result = pan.Close.ix['01-18-12']
self.assertEqual(len(result), 3)
# sanity checking
assert np.issubdtype(result.dtype, np.floating)
expected = np.array([[18.99, 28.4, 25.18],
[18.58, 28.31, 25.13],
[19.03, 28.16, 25.52],
[18.81, 28.82, 25.87]])
result = pan.Open.ix['Jan-15-12':'Jan-20-12']
self.assertEqual(expected.shape, result.shape)
@network
def test_get_date_ret_index(self):
pan = web.get_data_yahoo(['GE', 'INTC', 'IBM'], '1977', '1987',
ret_index=True)
self.assertTrue(hasattr(pan, 'Ret_Index'))
if hasattr(pan, 'Ret_Index') and hasattr(pan.Ret_Index, 'INTC'):
tstamp = pan.Ret_Index.INTC.first_valid_index()
result = pan.Ret_Index.ix[tstamp]['INTC']
self.assertEqual(result, 1.0)
# sanity checking
assert np.issubdtype(pan.values.dtype, np.floating)
class TestYahooOptions(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestYahooOptions, cls).setUpClass()
_skip_if_no_lxml()
# aapl has monthlies
cls.aapl = web.Options('aapl', 'yahoo')
today = datetime.today()
cls.year = today.year
cls.month = today.month + 1
if cls.month > 12:
cls.year = cls.year + 1
cls.month = 1
cls.expiry = datetime(cls.year, cls.month, 1)
cls.dirpath = tm.get_data_path()
cls.html1 = os.path.join(cls.dirpath, 'yahoo_options1.html')
cls.html2 = os.path.join(cls.dirpath, 'yahoo_options2.html')
cls.data1 = cls.aapl._option_frames_from_url(cls.html1)['puts']
@classmethod
def tearDownClass(cls):
super(TestYahooOptions, cls).tearDownClass()
del cls.aapl, cls.expiry
@network
def test_get_options_data(self):
# regression test GH6105
self.assertRaises(ValueError, self.aapl.get_options_data, month=3)
self.assertRaises(ValueError, self.aapl.get_options_data, year=1992)
try:
options = self.aapl.get_options_data(expiry=self.expiry)
except RemoteDataError as e:
raise nose.SkipTest(e)
self.assertTrue(len(options) > 1)
@network
def test_get_near_stock_price(self):
try:
options = self.aapl.get_near_stock_price(call=True, put=True,
expiry=self.expiry)
except RemoteDataError as e:
raise nose.SkipTest(e)
self.assertTrue(len(options) > 1)
@network
def test_get_call_data(self):
try:
calls = self.aapl.get_call_data(expiry=self.expiry)
except RemoteDataError as e:
raise nose.SkipTest(e)
self.assertTrue(len(calls) > 1)
@network
def test_get_put_data(self):
try:
puts = self.aapl.get_put_data(expiry=self.expiry)
except RemoteDataError as e:
raise nose.SkipTest(e)
self.assertTrue(len(puts) > 1)
@network
def test_get_expiry_dates(self):
try:
dates, _ = self.aapl._get_expiry_dates_and_links()
except RemoteDataError as e:
raise nose.SkipTest(e)
self.assertTrue(len(dates) > 1)
@network
def test_get_all_data(self):
try:
data = self.aapl.get_all_data(put=True)
except RemoteDataError as e:
raise nose.SkipTest(e)
self.assertTrue(len(data) > 1)
@network
def test_get_data_with_list(self):
try:
data = self.aapl.get_call_data(expiry=self.aapl.expiry_dates)
except RemoteDataError as e:
raise nose.SkipTest(e)
self.assertTrue(len(data) > 1)
@network
def test_get_all_data_calls_only(self):
try:
data = self.aapl.get_all_data(call=True, put=False)
except RemoteDataError as e:
raise nose.SkipTest(e)
self.assertTrue(len(data) > 1)
@network
def test_sample_page_price_quote_time1(self):
#Tests the weekend quote time format
price, quote_time = self.aapl._get_underlying_price(self.html1)
self.assertIsInstance(price, (int, float, complex))
self.assertIsInstance(quote_time, (datetime, Timestamp))
def test_chop(self):
#regression test for #7625
self.aapl.chop_data(self.data1, above_below=2, underlying_price=np.nan)
chopped = self.aapl.chop_data(self.data1, above_below=2, underlying_price=100)
self.assertIsInstance(chopped, DataFrame)
self.assertTrue(len(chopped) > 1)
def test_chop_out_of_strike_range(self):
#regression test for #7625
self.aapl.chop_data(self.data1, above_below=2, underlying_price=np.nan)
chopped = self.aapl.chop_data(self.data1, above_below=2, underlying_price=100000)
self.assertIsInstance(chopped, DataFrame)
self.assertTrue(len(chopped) > 1)
@network
def test_sample_page_price_quote_time2(self):
#Tests the EDT page format
#regression test for #8741
price, quote_time = self.aapl._get_underlying_price(self.html2)
self.assertIsInstance(price, (int, float, complex))
self.assertIsInstance(quote_time, (datetime, Timestamp))
@network
def test_sample_page_chg_float(self):
#Tests that numeric columns with comma's are appropriately dealt with
self.assertEqual(self.data1['Chg'].dtype, 'float64')
@network
def test_month_year(self):
try:
data = self.aapl.get_call_data(month=self.month, year=self.year)
except RemoteDataError as e:
raise nose.SkipTest(e)
self.assertTrue(len(data) > 1)
class TestOptionsWarnings(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestOptionsWarnings, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestOptionsWarnings, cls).tearDownClass()
@network
def test_options_source_warning(self):
with assert_produces_warning():
aapl = web.Options('aapl')
class TestDataReader(tm.TestCase):
def test_is_s3_url(self):
from pandas.io.common import _is_s3_url
self.assertTrue(_is_s3_url("s3://pandas/somethingelse.com"))
@network
def test_read_yahoo(self):
gs = DataReader("GS", "yahoo")
assert isinstance(gs, DataFrame)
@network
def test_read_google(self):
gs = DataReader("GS", "google")
assert isinstance(gs, DataFrame)
@network
def test_read_fred(self):
vix = DataReader("VIXCLS", "fred")
assert isinstance(vix, DataFrame)
@network
def test_read_famafrench(self):
for name in ("F-F_Research_Data_Factors",
"F-F_Research_Data_Factors_weekly", "6_Portfolios_2x3",
"F-F_ST_Reversal_Factor", "F-F_Momentum_Factor"):
ff = DataReader(name, "famafrench")
assert ff
assert isinstance(ff, dict)
class TestFred(tm.TestCase):
@network
def test_fred(self):
# Throws an exception when DataReader can't get a 200 response from
# FRED.
start = datetime(2010, 1, 1)
end = datetime(2013, 1, 27)
received = web.DataReader("GDP", "fred", start, end)['GDP'].tail(1)[0]
# < 7/30/14 16535 was returned
#self.assertEqual(int(received), 16535)
self.assertEqual(int(received), 16502)
self.assertRaises(Exception, web.DataReader, "NON EXISTENT SERIES",
'fred', start, end)
@network
def test_fred_nan(self):
start = datetime(2010, 1, 1)
end = datetime(2013, 1, 27)
df = web.DataReader("DFII5", "fred", start, end)
assert pd.isnull(df.ix['2010-01-01'][0])
@network
def test_fred_parts(self):
raise nose.SkipTest('buggy as of 2/18/14; maybe a data revision?')
start = datetime(2010, 1, 1)
end = datetime(2013, 1, 27)
df = web.get_data_fred("CPIAUCSL", start, end)
self.assertEqual(df.ix['2010-05-01'][0], 217.23)
t = df.CPIAUCSL.values
assert np.issubdtype(t.dtype, np.floating)
self.assertEqual(t.shape, (37,))
@network
def test_fred_part2(self):
expected = [[576.7],
[962.9],
[684.7],
[848.3],
[933.3]]
result = web.get_data_fred("A09024USA144NNBR", start="1915").ix[:5]
assert_array_equal(result.values, np.array(expected))
@network
def test_invalid_series(self):
name = "NOT A REAL SERIES"
self.assertRaises(Exception, web.get_data_fred, name)
@network
def test_fred_multi(self):
raise nose.SkipTest('buggy as of 2/18/14; maybe a data revision?')
names = ['CPIAUCSL', 'CPALTT01USQ661S', 'CPILFESL']
start = datetime(2010, 1, 1)
end = datetime(2013, 1, 27)
received = web.DataReader(names, "fred", start, end).head(1)
expected = DataFrame([[217.478, 0.99701529, 220.544]], columns=names,
index=[pd.tslib.Timestamp('2010-01-01 00:00:00')])
expected.index.rename('DATE', inplace=True)
assert_frame_equal(received, expected, check_less_precise=True)
@network
def test_fred_multi_bad_series(self):
names = ['NOTAREALSERIES', 'CPIAUCSL', "ALSO FAKE"]
with tm.assertRaises(HTTPError):
DataReader(names, data_source="fred")
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
mit
|
sequana/sequana
|
sequana/modules_report/summary.py
|
1
|
12764
|
# coding: utf-8
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Dimitri Desvillechabrol <[email protected]>,
# <[email protected]>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
"""Module to write summary.html have all information about the pipeline and
to visit other analysis"""
import os
from sequana.lazy import pandas as pd
import easydev
from sequana.modules_report.base_module import SequanaBaseModule
from sequana.utils import config
from sequana.utils.datatables_js import DataTable
from sequana.snaketools import SnakeMakeStats
class SummaryBase(SequanaBaseModule):
def __init__(self, required_dir=None):
super(SummaryBase, self).__init__(required_dir=required_dir)
def running_stats(self):
""" Barplot that shows computing time of each rule.
"""
try:
stats = SnakeMakeStats(self.json['stats'])
except KeyError:
return
png = self.create_embedded_png(stats.plot_and_save, 'filename',
outputdir=None)
l, c = self.create_hide_section('Stats', 'collapse/expand', png, True)
self.sections.append({
'name': "Running Stats {0}".format(
self.add_float_right('<small>{0}</small>'.format(l))
),
'anchor': 'stats',
'content': c
})
def dependencies(self):
""" Table with all python dependencies and a text file with tools
needed and their versions.
"""
html_table = self.get_table_dependencies()
pypi = self.create_link('Pypi', 'http://pypi.python.org')
try:
if 'requirements' in self.json:
req = self.copy_file(self.json['requirements'], 'inputs')
else:
raise Exception
except:
try :
req = self.json['requirements']
except:
return
req = self.create_link('requirements', req)
content = ("<p>Dependencies downloaded from bioconda "
"<b>{2}</b></p>"
"<p>Python dependencies (<b>{0}</b>){1}</p>".format(
pypi, html_table, req))
l, c = self.create_hide_section('Dep', 'collapse/expand', content,
hide=True)
self.sections.append({
'name': "Dependencies {0}".format(
self.add_float_right('<small>{0}</small>'.format(l))
),
'anchor': 'dependencies',
'content': c
})
def get_table_dependencies(self):
""" Return dependencies of Sequana.
"""
dep_list = easydev.get_dependencies('sequana')
# if installed with conda, this will be empty
if len(dep_list) == 0:
return ""
project_name = list()
version = list()
link = list()
pypi = 'https://pypi.python.org/pypi/{0}'
for dep in dep_list:
version.append(dep.version)
project_name.append(dep.project_name)
link.append(pypi.format(dep.project_name))
df = pd.DataFrame({'package': project_name, 'version': version,
'link': link})
df['sort'] = df['package'].str.lower()
df.sort_values(by='sort', axis=0, inplace=True)
df.drop('sort', axis=1, inplace=True)
datatable = DataTable(df, 'dep')
datatable.datatable.datatable_options = {'paging': 'false',
'bFilter': 'false',
'bInfo': 'false',
'bSort': 'false'}
datatable.datatable.set_links_to_column('link', 'package')
js = datatable.create_javascript_function()
html = datatable.create_datatable()
return js + '\n' + html
class SummaryModule(SummaryBase):
""" Write summary HTML report of an analysis. It contains all information
about the pipeline used, input/output files and version of software.
"""
def __init__(self, data, intro="", output_filename="summary.html"):
"""
"""
super().__init__()
self.json = data
for this in ["inputs", "outputs"]:
assert this in self.json
self.title = "Sequana Report Summary"
self.intro = intro
self.create_report_content()
self.create_html(output_filename)
def create_report_content(self):
""" Create the report content.
"""
self.sections = list()
if self.json['inputs']:
self.pipeline_inputs()
if self.json["outputs"]:
self.pipeline_outputs()
if self.json['html']:
self.pipeline_html()
for section in config.summary_sections:
self.sections.append(section)
self.workflow()
self.running_stats()
self.dependencies()
def pipeline_inputs(self):
""" Links corresponding to the analysed input files.
"""
# copy inputs in the input directory
input_dir = "inputs"
inputs = [self.copy_file(i, input_dir) for i in self.json['inputs']]
# create links list
html_list = '<li>{0}</li>'
links = [html_list.format(self.create_link(os.path.basename(i), i,
newtab=False, download=True)) for i in inputs]
links = '<ul>{0}</ul>'.format("\n".join(links))
self.sections.append({
'name': 'Inputs',
'anchor': 'input',
'content':
"<p>Link to the original data analysed.</p>\n"
"{0}".format(links)
})
def pipeline_outputs(self):
""" Links to important outputs generated by the pipeline
"""
# copy outputs in the output directory
output_dir = "outputs"
outputs = [self.copy_file(i, output_dir) for i in self.json['outputs']]
# create links list
html_list = '<li>{0}</li>'
links = [html_list.format(self.create_link(os.path.basename(i), i,
newtab=False, download=True)) for i in outputs]
links = '<ul>{0}</ul>'.format("\n".join(links))
self.sections.append({
'name': 'Outputs',
'anchor': 'outputs',
'content':
"<p>Link to the most important output files generated by the "
"pipeline.</p>\n{0}".format(links)
})
def pipeline_html(self):
""" Links to HTML pages created by the rules.
"""
output_dir = "html"
html = [self.copy_file(i, output_dir) for i in self.json['html']]
html_list = '<li>{0}</li>'
links = [html_list.format(self.create_link(os.path.basename(i), i))
for i in html]
links = '<ul>{0}</ul>'.format("\n".join(links))
self.sections.append({
'name': "External HTML",
'anchor': 'ext_html',
'content':
"<p>Link to HTML pages created by the pipeline.</p>\n{0}"
"\n".format(links)
})
def workflow(self):
""" Create the interactive DAG to navigate through pages.
"""
snakefile = self.copy_file(self.json['snakefile'], './inputs')
configfile = self.copy_file(self.json['config'], './inputs')
# move the SVG file in the images directory
img = self.copy_file(self.json['rulegraph'], './images')
dag_svg = self.include_svg_image(img, alt='rulegraph')
with open(self.json['snakefile'], 'r') as fp:
code = self.add_code_section(fp.read(), 'python')
sf = self.create_hide_section('Sf', "Show/hide Snakemake file", code,
hide=True)
sf = "\n".join(sf)
with open(self.json['config'], 'r') as fp:
code = self.add_code_section(fp.read(), 'yaml')
c = self.create_hide_section('C', "Show/hide config file", code,
hide=True)
c = "\n".join(c)
self.sections.append({
'name': 'Workflow',
'anchor': 'workflow',
'content':
"<p>The following network shows the workflow of the pipeline. "
"Blue boxes are clickable and redirect to dedicated reports."
"</p>\n{0}\n"
"<p>The analysis was performed with the following "
'<a href="{3}">Snakemake</a> and <a href="{4}">configfile</a>:'
"</p>\n"
"<ul>\n"
" <li>{1}</li>\n"
" <li>{2}</li>\n"
"</ul>".format(dag_svg, sf, c, snakefile, configfile)
})
class SummaryModule2(SummaryBase):
""" Write summary HTML report of an analysis. It contains all information
about the pipeline used, input/output files and version of software.
"""
def __init__(self, data, intro="", output_filename="summary.html", title="",
workflow=True):
"""
"""
super(SummaryModule2, self).__init__(required_dir=("js", "css"))
self.json = data
self.name = data['name']
self.title = "Sequana Report Summary ({})".format(self.name)
self.intro = intro
try:
config.pipeline_version = data['pipeline_version']
config.pipeline_name = data['name']
except:
pass
self.create_report_content(workflow=workflow)
self.create_html(output_filename)
def create_report_content(self, workflow=True):
""" Create the report content.
"""
self.sections = list()
for section in config.summary_sections:
self.sections.append(section)
if workflow:
self.workflow()
self.running_stats()
self.dependencies()
def workflow(self):
img = self.json['rulegraph']
dag_svg = self.include_svg_image(img, alt="workflow")
snakefile = ".sequana/{}.rules".format(self.name)
try:
with open(snakefile, 'r') as fp:
code = self.add_code_section(fp.read(), 'python')
sf = self.create_hide_section('Sf', "Show/hide Snakemake file", code,
hide=True)
sf = "\n".join(sf)
except:
sf = "no snakefile found in .sequana/"
configfile = ".sequana/config.yaml"
try:
with open(configfile, 'r') as fp:
code = self.add_code_section(fp.read(), 'yaml')
c = self.create_hide_section('C', "Show/hide config file", code,
hide=True)
c = "\n".join(c)
except:
c = "no config found in .sequana/"
self.sections.append({
'name': 'Workflow',
'anchor': 'workflow',
'content':
"<p>The following network shows the workflow of the pipeline. "
"Blue boxes are clickable and redirect to dedicated reports."
"</p>\n{0}\n"
"<p>The analysis was performed with the following "
'<a href="{3}">Snakemake</a> and <a href="{4}">configfile</a>:'
"</p>\n"
"<ul>\n"
" <li>{1}</li>\n"
" <li>{2}</li>\n"
"</ul>".format(dag_svg, sf, c, snakefile, configfile)
})
def dependencies(self):
""" Table with all python dependencies and a text file with tools
needed and their versions.
"""
html_table = self.get_table_dependencies()
pypi = self.create_link('Pypi', 'http://pypi.python.org')
req = self.create_link('requirements', ".sequana/env.yml")
content = ("<p>Dependencies downloaded from bioconda "
"<b>{2}</b></p>"
"<p>Python dependencies (<b>{0}</b>){1}</p>".format(
pypi, html_table, req))
l, c = self.create_hide_section('Dep', 'collapse/expand', content,
hide=True)
self.sections.append({
'name': "Dependencies {0}".format(
self.add_float_right('<small>{0}</small>'.format(l))
),
'anchor': 'dependencies',
'content': c
})
|
bsd-3-clause
|
eickenberg/scikit-learn
|
examples/covariance/plot_outlier_detection.py
|
14
|
3892
|
"""
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as outliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
|
bsd-3-clause
|
PyPSA/PyPSA
|
pypsa/opf.py
|
1
|
73288
|
## Copyright 2015-2018 Tom Brown (FIAS), Jonas Hoersch (FIAS), David
## Schlachtberger (FIAS)
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Optimal Power Flow functions.
"""
__author__ = "Tom Brown (FIAS), Jonas Hoersch (FIAS), David Schlachtberger (FIAS)"
__copyright__ = "Copyright 2015-2017 Tom Brown (FIAS), Jonas Hoersch (FIAS), David Schlachtberger (FIAS), GNU GPL 3"
import numpy as np
import pandas as pd
from scipy.sparse.linalg import spsolve
from pyomo.environ import (ConcreteModel, Var, NonNegativeReals, Constraint,
Reals, Suffix, Binary, SolverFactory)
try:
from pyomo.solvers.plugins.solvers.persistent_solver import PersistentSolver
except ImportError:
# Only used in conjunction with isinstance, so we mock it to be backwards compatible
class PersistentSolver(): pass
import logging
logger = logging.getLogger(__name__)
from .pf import (calculate_dependent_values, find_slack_bus,
find_bus_controls, calculate_B_H, calculate_PTDF, find_tree,
find_cycles, _as_snapshots)
from .opt import (l_constraint, l_objective, LExpression, LConstraint,
patch_optsolver_record_memusage_before_solving,
empty_network, free_pyomo_initializers)
from .descriptors import (get_switchable_as_dense, get_switchable_as_iter,
allocate_series_dataframes, zsum)
pd.Series.zsum = zsum
def network_opf(network,snapshots=None):
"""Optimal power flow for snapshots."""
raise NotImplementedError("Non-linear optimal power flow not supported yet")
def define_generator_variables_constraints(network,snapshots):
extendable_gens_i = network.generators.index[network.generators.p_nom_extendable]
fixed_gens_i = network.generators.index[~network.generators.p_nom_extendable & ~network.generators.committable]
fixed_committable_gens_i = network.generators.index[~network.generators.p_nom_extendable & network.generators.committable]
if (network.generators.p_nom_extendable & network.generators.committable).any():
logger.warning("The following generators have both investment optimisation and unit commitment:\n{}\nCurrently PyPSA cannot do both these functions, so PyPSA is choosing investment optimisation for these generators.".format(network.generators.index[network.generators.p_nom_extendable & network.generators.committable]))
bad_uc_gens = network.generators.index[network.generators.committable & (network.generators.min_up_time > 0) & (network.generators.min_down_time > 0) & (network.generators.up_time_before > 0) & (network.generators.down_time_before > 0)]
if not bad_uc_gens.empty:
logger.warning("The following committable generators were both up and down before the simulation: {}. This will cause an infeasibility.".format(bad_uc_gens))
start_i = network.snapshots.get_loc(snapshots[0])
p_min_pu = get_switchable_as_dense(network, 'Generator', 'p_min_pu', snapshots)
p_max_pu = get_switchable_as_dense(network, 'Generator', 'p_max_pu', snapshots)
## Define generator dispatch variables ##
gen_p_bounds = {(gen,sn) : (None,None)
for gen in extendable_gens_i.union(fixed_committable_gens_i)
for sn in snapshots}
if len(fixed_gens_i):
var_lower = p_min_pu.loc[:,fixed_gens_i].multiply(network.generators.loc[fixed_gens_i, 'p_nom'])
var_upper = p_max_pu.loc[:,fixed_gens_i].multiply(network.generators.loc[fixed_gens_i, 'p_nom'])
gen_p_bounds.update({(gen,sn) : (var_lower[gen][sn],var_upper[gen][sn])
for gen in fixed_gens_i
for sn in snapshots})
def gen_p_bounds_f(model,gen_name,snapshot):
return gen_p_bounds[gen_name,snapshot]
network.model.generator_p = Var(list(network.generators.index), snapshots,
domain=Reals, bounds=gen_p_bounds_f)
free_pyomo_initializers(network.model.generator_p)
## Define generator capacity variables if generator is extendable ##
def gen_p_nom_bounds(model, gen_name):
return (network.generators.at[gen_name,"p_nom_min"],
network.generators.at[gen_name,"p_nom_max"])
network.model.generator_p_nom = Var(list(extendable_gens_i),
domain=NonNegativeReals, bounds=gen_p_nom_bounds)
free_pyomo_initializers(network.model.generator_p_nom)
## Define generator dispatch constraints for extendable generators ##
gen_p_lower = {(gen,sn) :
[[(1,network.model.generator_p[gen,sn]),
(-p_min_pu.at[sn, gen],
network.model.generator_p_nom[gen])],">=",0.]
for gen in extendable_gens_i for sn in snapshots}
l_constraint(network.model, "generator_p_lower", gen_p_lower,
list(extendable_gens_i), snapshots)
gen_p_upper = {(gen,sn) :
[[(1,network.model.generator_p[gen,sn]),
(-p_max_pu.at[sn, gen],
network.model.generator_p_nom[gen])],"<=",0.]
for gen in extendable_gens_i for sn in snapshots}
l_constraint(network.model, "generator_p_upper", gen_p_upper,
list(extendable_gens_i), snapshots)
## Define committable generator statuses ##
network.model.generator_status = Var(list(fixed_committable_gens_i), snapshots,
within=Binary)
var_lower = p_min_pu.loc[:,fixed_committable_gens_i].multiply(network.generators.loc[fixed_committable_gens_i, 'p_nom'])
var_upper = p_max_pu.loc[:,fixed_committable_gens_i].multiply(network.generators.loc[fixed_committable_gens_i, 'p_nom'])
committable_gen_p_lower = {(gen,sn) : LConstraint(LExpression([(var_lower[gen][sn],network.model.generator_status[gen,sn]),(-1.,network.model.generator_p[gen,sn])]),"<=") for gen in fixed_committable_gens_i for sn in snapshots}
l_constraint(network.model, "committable_gen_p_lower", committable_gen_p_lower,
list(fixed_committable_gens_i), snapshots)
committable_gen_p_upper = {(gen,sn) : LConstraint(LExpression([(var_upper[gen][sn],network.model.generator_status[gen,sn]),(-1.,network.model.generator_p[gen,sn])]),">=") for gen in fixed_committable_gens_i for sn in snapshots}
l_constraint(network.model, "committable_gen_p_upper", committable_gen_p_upper,
list(fixed_committable_gens_i), snapshots)
## Deal with minimum up time ##
up_time_gens = fixed_committable_gens_i[network.generators.loc[fixed_committable_gens_i,"min_up_time"] > 0]
must_stay_up_too_long = False
for gen_i, gen in enumerate(up_time_gens):
min_up_time = network.generators.at[gen, 'min_up_time']
#find out how long the generator has been up before snapshots
up_time_before = 0
for i in range(1,min(min_up_time,start_i)+1):
if network.generators_t.status.at[network.snapshots[start_i-i],gen] == 0:
break
else:
up_time_before += 1
if up_time_before == start_i:
up_time_before = min(min_up_time,start_i+network.generators.at[gen,"up_time_before"])
if up_time_before == 0:
initial_status = 0
must_stay_up = 0
else:
initial_status = 1
must_stay_up = min_up_time - up_time_before
if must_stay_up > len(snapshots):
must_stay_up_too_long = True
must_stay_up = len(snapshots)
def force_up(model,i):
return model.generator_status[gen,snapshots[i]] == 1
network.model.add_component("gen_up_time_force_{}".format(gen_i),Constraint(range(must_stay_up),rule=force_up))
blocks = range(must_stay_up,len(snapshots)-1)
def gen_rule(model,i):
period = min(min_up_time,len(snapshots)-i)
lhs = sum(network.model.generator_status[gen,snapshots[j]] for j in range(i,i+period))
if i == 0:
rhs = period*network.model.generator_status[gen,snapshots[i]] - period*initial_status
else:
rhs = period*network.model.generator_status[gen,snapshots[i]] - period*network.model.generator_status[gen,snapshots[i-1]]
return lhs >= rhs
network.model.add_component("gen_up_time_{}".format(gen_i),Constraint(blocks,rule=gen_rule))
if must_stay_up_too_long:
logger.warning('At least one generator was set to an min_up_time longer '
'than possible. Setting it to the maximal possible value.')
## Deal with minimum down time ##
down_time_gens = fixed_committable_gens_i[network.generators.loc[fixed_committable_gens_i,"min_down_time"] > 0]
for gen_i, gen in enumerate(down_time_gens):
min_down_time = network.generators.at[gen,"min_down_time"]
#find out how long the generator has been down before snapshots
down_time_before = 0
for i in range(1,min(min_down_time,start_i)+1):
if network.generators_t.status.at[network.snapshots[start_i-i],gen] == 1:
break
else:
down_time_before += 1
if down_time_before == start_i:
down_time_before = min(min_down_time,start_i+network.generators.at[gen,"down_time_before"])
if down_time_before == 0:
initial_status = 1
must_stay_down = 0
else:
initial_status = 0
must_stay_down = min_down_time - down_time_before
def force_down(model,i):
return model.generator_status[gen,snapshots[i]] == 0
network.model.add_component("gen_down_time_force_{}".format(gen_i),Constraint(range(must_stay_down),rule=force_down))
blocks = range(must_stay_down,len(snapshots)-1)
def gen_rule(model,i):
period = min(min_down_time,len(snapshots)-i)
lhs = period - sum(network.model.generator_status[gen,snapshots[j]] for j in range(i,i+period))
if i == 0:
rhs = -period*network.model.generator_status[gen,snapshots[i]] + period*initial_status
else:
rhs = -period*network.model.generator_status[gen,snapshots[i]] + period*network.model.generator_status[gen,snapshots[i-1]]
return lhs >= rhs
network.model.add_component("gen_down_time_{}".format(gen_i),Constraint(blocks,rule=gen_rule))
## Deal with start up costs ##
suc_gens = fixed_committable_gens_i[network.generators.loc[fixed_committable_gens_i,"start_up_cost"] > 0]
network.model.generator_start_up_cost = Var(list(suc_gens),snapshots,
domain=NonNegativeReals)
sucs = {}
for gen in suc_gens:
suc = network.generators.at[gen,"start_up_cost"]
if start_i == 0:
if network.generators.at[gen,"up_time_before"] > 0:
initial_status = 1
else:
initial_status = 0
else:
initial_status = network.generators_t.status.at[network.snapshots[start_i-1],gen]
for i,sn in enumerate(snapshots):
if i == 0:
rhs = LExpression([(suc, network.model.generator_status[gen,sn])],-suc*initial_status)
else:
rhs = LExpression([(suc, network.model.generator_status[gen,sn]),(-suc,network.model.generator_status[gen,snapshots[i-1]])])
lhs = LExpression([(1,network.model.generator_start_up_cost[gen,sn])])
sucs[gen,sn] = LConstraint(lhs,">=",rhs)
l_constraint(network.model, "generator_start_up", sucs, list(suc_gens), snapshots)
## Deal with shut down costs ##
sdc_gens = fixed_committable_gens_i[network.generators.loc[fixed_committable_gens_i,"shut_down_cost"] > 0]
network.model.generator_shut_down_cost = Var(list(sdc_gens),snapshots,
domain=NonNegativeReals)
sdcs = {}
for gen in sdc_gens:
sdc = network.generators.loc[gen,"shut_down_cost"]
if start_i == 0:
if network.generators.at[gen,"down_time_before"] > 0:
initial_status = 0
else:
initial_status = 1
else:
initial_status = network.generators_t.status.at[network.snapshots[start_i-1],gen]
for i,sn in enumerate(snapshots):
if i == 0:
rhs = LExpression([(-sdc, network.model.generator_status[gen,sn])],sdc*initial_status)
else:
rhs = LExpression([(-sdc, network.model.generator_status[gen,sn]),(sdc,network.model.generator_status[gen,snapshots[i-1]])])
lhs = LExpression([(1,network.model.generator_shut_down_cost[gen,sn])])
sdcs[gen,sn] = LConstraint(lhs,">=",rhs)
l_constraint(network.model, "generator_shut_down", sdcs, list(sdc_gens), snapshots)
## Deal with ramp limits without unit commitment ##
ru_gens = network.generators.index[network.generators.ramp_limit_up.notnull()]
ru = {}
for gen in ru_gens:
for sn, sn_prev in zip(snapshots[1:], snapshots[:-1]):
if network.generators.at[gen, "p_nom_extendable"]:
lhs = LExpression([(1, network.model.generator_p[gen,sn]),
(-1, network.model.generator_p[gen,sn_prev]),
(-network.generators.at[gen, "ramp_limit_up"],
network.model.generator_p_nom[gen])])
elif not network.generators.at[gen, "committable"]:
lhs = LExpression([(1, network.model.generator_p[gen,sn]),
(-1, network.model.generator_p[gen,sn_prev])],
-network.generators.at[gen, "ramp_limit_up"]*network.generators.at[gen, "p_nom"])
else:
lhs = LExpression([(1, network.model.generator_p[gen,sn]),
(-1, network.model.generator_p[gen,sn_prev]),
((network.generators.at[gen, "ramp_limit_start_up"] - network.generators.at[gen, "ramp_limit_up"])*network.generators.at[gen, "p_nom"],
network.model.generator_status[gen,sn_prev]),
(-network.generators.at[gen, "ramp_limit_start_up"]*network.generators.at[gen, "p_nom"],
network.model.generator_status[gen,sn])])
ru[gen,sn] = LConstraint(lhs,"<=")
l_constraint(network.model, "ramp_up", ru, list(ru_gens), snapshots[1:])
#case of ramping if not at start of network.snapshots
if start_i > 0:
ru_start = {}
sn = snapshots[0]
for gen in ru_gens:
p_prev = network.generators_t.p.at[network.snapshots[start_i-1],gen]
if network.generators.at[gen, "p_nom_extendable"]:
lhs = LExpression([(1, network.model.generator_p[gen,sn]),
(-network.generators.at[gen, "ramp_limit_up"],
network.model.generator_p_nom[gen])],
-p_prev)
elif not network.generators.at[gen, "committable"]:
lhs = LExpression([(1, network.model.generator_p[gen,sn])],
-network.generators.at[gen, "ramp_limit_up"]*network.generators.at[gen, "p_nom"]-p_prev)
else:
status_prev = network.generators_t.status.at[network.snapshots[start_i-1],gen]
lhs = LExpression([(1, network.model.generator_p[gen,sn]),
(-network.generators.at[gen, "ramp_limit_start_up"]*network.generators.at[gen, "p_nom"],
network.model.generator_status[gen,sn])],
-p_prev + status_prev*(network.generators.at[gen, "ramp_limit_start_up"] - network.generators.at[gen, "ramp_limit_up"])*network.generators.at[gen, "p_nom"])
ru_start[gen] = LConstraint(lhs,"<=")
l_constraint(network.model, "ramp_up_start", ru_start, list(ru_gens))
rd_gens = network.generators.index[network.generators.ramp_limit_down.notnull()]
rd = {}
for gen in rd_gens:
for sn, sn_prev in zip(snapshots[1:], snapshots[:-1]):
if network.generators.at[gen, "p_nom_extendable"]:
lhs = LExpression([(1, network.model.generator_p[gen,sn]),
(-1, network.model.generator_p[gen,sn_prev]),
(network.generators.at[gen, "ramp_limit_down"],
network.model.generator_p_nom[gen])])
elif not network.generators.at[gen, "committable"]:
lhs = LExpression([(1, network.model.generator_p[gen,sn]),
(-1, network.model.generator_p[gen,sn_prev])],
network.generators.loc[gen, "ramp_limit_down"]*network.generators.at[gen, "p_nom"])
else:
lhs = LExpression([(1, network.model.generator_p[gen,sn]),
(-1, network.model.generator_p[gen,sn_prev]),
((network.generators.at[gen, "ramp_limit_down"] - network.generators.at[gen, "ramp_limit_shut_down"])*network.generators.at[gen, "p_nom"],
network.model.generator_status[gen,sn]),
(network.generators.at[gen, "ramp_limit_shut_down"]*network.generators.at[gen, "p_nom"],
network.model.generator_status[gen,sn_prev])])
rd[gen,sn] = LConstraint(lhs,">=")
l_constraint(network.model, "ramp_down", rd, list(rd_gens), snapshots[1:])
#case of ramping if not at start of network.snapshots
if start_i > 0:
rd_start = {}
sn = snapshots[0]
for gen in rd_gens:
p_prev = network.generators_t.p.at[network.snapshots[start_i-1],gen]
if network.generators.at[gen, "p_nom_extendable"]:
lhs = LExpression([(1, network.model.generator_p[gen,sn]),
(network.generators.at[gen, "ramp_limit_down"],
network.model.generator_p_nom[gen])],
-p_prev)
elif not network.generators.at[gen, "committable"]:
lhs = LExpression([(1, network.model.generator_p[gen,sn])],
network.generators.loc[gen, "ramp_limit_down"]*network.generators.at[gen, "p_nom"]-p_prev)
else:
status_prev = network.generators_t.status.at[network.snapshots[start_i-1],gen]
lhs = LExpression([(1, network.model.generator_p[gen,sn]),
((network.generators.at[gen, "ramp_limit_down"] - network.generators.at[gen, "ramp_limit_shut_down"])*network.generators.at[gen, "p_nom"],
network.model.generator_status[gen,sn])],
-p_prev + status_prev*network.generators.at[gen, "ramp_limit_shut_down"]*network.generators.at[gen, "p_nom"])
rd_start[gen] = LConstraint(lhs,">=")
l_constraint(network.model, "ramp_down_start", rd_start, list(rd_gens))
def define_storage_variables_constraints(network,snapshots):
sus = network.storage_units
ext_sus_i = sus.index[sus.p_nom_extendable]
fix_sus_i = sus.index[~ sus.p_nom_extendable]
model = network.model
## Define storage dispatch variables ##
p_max_pu = get_switchable_as_dense(network, 'StorageUnit', 'p_max_pu', snapshots)
p_min_pu = get_switchable_as_dense(network, 'StorageUnit', 'p_min_pu', snapshots)
bounds = {(su,sn) : (0,None) for su in ext_sus_i for sn in snapshots}
bounds.update({(su,sn) :
(0,sus.at[su,"p_nom"]*p_max_pu.at[sn, su])
for su in fix_sus_i for sn in snapshots})
def su_p_dispatch_bounds(model,su_name,snapshot):
return bounds[su_name,snapshot]
network.model.storage_p_dispatch = Var(list(network.storage_units.index), snapshots,
domain=NonNegativeReals, bounds=su_p_dispatch_bounds)
free_pyomo_initializers(network.model.storage_p_dispatch)
bounds = {(su,sn) : (0,None) for su in ext_sus_i for sn in snapshots}
bounds.update({(su,sn) :
(0,-sus.at[su,"p_nom"]*p_min_pu.at[sn, su])
for su in fix_sus_i
for sn in snapshots})
def su_p_store_bounds(model,su_name,snapshot):
return bounds[su_name,snapshot]
network.model.storage_p_store = Var(list(network.storage_units.index), snapshots,
domain=NonNegativeReals, bounds=su_p_store_bounds)
free_pyomo_initializers(network.model.storage_p_store)
## Define spillage variables only for hours with inflow>0. ##
inflow = get_switchable_as_dense(network, 'StorageUnit', 'inflow', snapshots)
spill_sus_i = sus.index[inflow.max()>0] #skip storage units without any inflow
inflow_gt0_b = inflow>0
spill_bounds = {(su,sn) : (0,inflow.at[sn,su])
for su in spill_sus_i
for sn in snapshots
if inflow_gt0_b.at[sn,su]}
spill_index = spill_bounds.keys()
def su_p_spill_bounds(model,su_name,snapshot):
return spill_bounds[su_name,snapshot]
network.model.storage_p_spill = Var(list(spill_index),
domain=NonNegativeReals, bounds=su_p_spill_bounds)
free_pyomo_initializers(network.model.storage_p_spill)
## Define generator capacity variables if generator is extendable ##
def su_p_nom_bounds(model, su_name):
return (sus.at[su_name,"p_nom_min"],
sus.at[su_name,"p_nom_max"])
network.model.storage_p_nom = Var(list(ext_sus_i), domain=NonNegativeReals,
bounds=su_p_nom_bounds)
free_pyomo_initializers(network.model.storage_p_nom)
## Define generator dispatch constraints for extendable generators ##
def su_p_upper(model,su_name,snapshot):
return (model.storage_p_dispatch[su_name,snapshot] <=
model.storage_p_nom[su_name]*p_max_pu.at[snapshot, su_name])
network.model.storage_p_upper = Constraint(list(ext_sus_i),snapshots,rule=su_p_upper)
free_pyomo_initializers(network.model.storage_p_upper)
def su_p_lower(model,su_name,snapshot):
return (model.storage_p_store[su_name,snapshot] <=
-model.storage_p_nom[su_name]*p_min_pu.at[snapshot, su_name])
network.model.storage_p_lower = Constraint(list(ext_sus_i),snapshots,rule=su_p_lower)
free_pyomo_initializers(network.model.storage_p_lower)
## Now define state of charge constraints ##
network.model.state_of_charge = Var(list(network.storage_units.index), snapshots,
domain=NonNegativeReals, bounds=(0,None))
upper = {(su,sn) : [[(1,model.state_of_charge[su,sn]),
(-sus.at[su,"max_hours"],model.storage_p_nom[su])],"<=",0.]
for su in ext_sus_i for sn in snapshots}
upper.update({(su,sn) : [[(1,model.state_of_charge[su,sn])],"<=",
sus.at[su,"max_hours"]*sus.at[su,"p_nom"]]
for su in fix_sus_i for sn in snapshots})
l_constraint(model, "state_of_charge_upper", upper,
list(network.storage_units.index), snapshots)
#this builds the constraint previous_soc + p_store - p_dispatch + inflow - spill == soc
#it is complicated by the fact that sometimes previous_soc and soc are floats, not variables
soc = {}
#store the combinations with a fixed soc
fixed_soc = {}
state_of_charge_set = get_switchable_as_dense(network, 'StorageUnit', 'state_of_charge_set', snapshots)
for su in sus.index:
for i,sn in enumerate(snapshots):
soc[su,sn] = [[],"==",0.]
elapsed_hours = network.snapshot_weightings.stores[sn]
if i == 0 and not sus.at[su,"cyclic_state_of_charge"]:
previous_state_of_charge = sus.at[su,"state_of_charge_initial"]
soc[su,sn][2] -= ((1-sus.at[su,"standing_loss"])**elapsed_hours
* previous_state_of_charge)
else:
previous_state_of_charge = model.state_of_charge[su,snapshots[i-1]]
soc[su,sn][0].append(((1-sus.at[su,"standing_loss"])**elapsed_hours,
previous_state_of_charge))
state_of_charge = state_of_charge_set.at[sn,su]
if pd.isnull(state_of_charge):
state_of_charge = model.state_of_charge[su,sn]
soc[su,sn][0].append((-1,state_of_charge))
else:
soc[su,sn][2] += state_of_charge
#make sure the variable is also set to the fixed state of charge
fixed_soc[su,sn] = [[(1,model.state_of_charge[su,sn])],"==",state_of_charge]
soc[su,sn][0].append((sus.at[su,"efficiency_store"]
* elapsed_hours,model.storage_p_store[su,sn]))
soc[su,sn][0].append((-(1/sus.at[su,"efficiency_dispatch"]) * elapsed_hours,
model.storage_p_dispatch[su,sn]))
soc[su,sn][2] -= inflow.at[sn,su] * elapsed_hours
for su,sn in spill_index:
elapsed_hours = network.snapshot_weightings.stores[sn]
storage_p_spill = model.storage_p_spill[su,sn]
soc[su,sn][0].append((-1.*elapsed_hours,storage_p_spill))
l_constraint(model,"state_of_charge_constraint",
soc,list(network.storage_units.index), snapshots)
l_constraint(model, "state_of_charge_constraint_fixed",
fixed_soc, list(fixed_soc.keys()))
def define_store_variables_constraints(network,snapshots):
stores = network.stores
ext_stores = stores.index[stores.e_nom_extendable]
fix_stores = stores.index[~ stores.e_nom_extendable]
e_max_pu = get_switchable_as_dense(network, 'Store', 'e_max_pu', snapshots)
e_min_pu = get_switchable_as_dense(network, 'Store', 'e_min_pu', snapshots)
model = network.model
## Define store dispatch variables ##
network.model.store_p = Var(list(stores.index), snapshots, domain=Reals)
## Define store energy variables ##
bounds = {(store,sn) : (None,None) for store in ext_stores for sn in snapshots}
bounds.update({(store,sn) :
(stores.at[store,"e_nom"]*e_min_pu.at[sn,store],stores.at[store,"e_nom"]*e_max_pu.at[sn,store])
for store in fix_stores for sn in snapshots})
def store_e_bounds(model,store,snapshot):
return bounds[store,snapshot]
network.model.store_e = Var(list(stores.index), snapshots, domain=Reals,
bounds=store_e_bounds)
free_pyomo_initializers(network.model.store_e)
## Define energy capacity variables if store is extendable ##
def store_e_nom_bounds(model, store):
return (stores.at[store,"e_nom_min"],
stores.at[store,"e_nom_max"])
network.model.store_e_nom = Var(list(ext_stores), domain=Reals,
bounds=store_e_nom_bounds)
free_pyomo_initializers(network.model.store_e_nom)
## Define energy capacity constraints for extendable generators ##
def store_e_upper(model,store,snapshot):
return (model.store_e[store,snapshot] <=
model.store_e_nom[store]*e_max_pu.at[snapshot,store])
network.model.store_e_upper = Constraint(list(ext_stores), snapshots, rule=store_e_upper)
free_pyomo_initializers(network.model.store_e_upper)
def store_e_lower(model,store,snapshot):
return (model.store_e[store,snapshot] >=
model.store_e_nom[store]*e_min_pu.at[snapshot,store])
network.model.store_e_lower = Constraint(list(ext_stores), snapshots, rule=store_e_lower)
free_pyomo_initializers(network.model.store_e_lower)
## Builds the constraint previous_e - p == e ##
e = {}
for store in stores.index:
for i,sn in enumerate(snapshots):
e[store,sn] = LConstraint(sense="==")
e[store,sn].lhs.variables.append((-1,model.store_e[store,sn]))
elapsed_hours = network.snapshot_weightings.stores[sn]
if i == 0 and not stores.at[store,"e_cyclic"]:
previous_e = stores.at[store,"e_initial"]
e[store,sn].lhs.constant += ((1-stores.at[store,"standing_loss"])**elapsed_hours
* previous_e)
else:
previous_e = model.store_e[store,snapshots[i-1]]
e[store,sn].lhs.variables.append(((1-stores.at[store,"standing_loss"])**elapsed_hours,
previous_e))
e[store,sn].lhs.variables.append((-elapsed_hours, model.store_p[store,sn]))
l_constraint(model,"store_constraint", e, list(stores.index), snapshots)
def define_branch_extension_variables(network,snapshots):
passive_branches = network.passive_branches()
extendable_passive_branches = passive_branches[passive_branches.s_nom_extendable]
bounds = {b : (extendable_passive_branches.at[b,"s_nom_min"],
extendable_passive_branches.at[b,"s_nom_max"])
for b in extendable_passive_branches.index}
def branch_s_nom_bounds(model, branch_type, branch_name):
return bounds[branch_type,branch_name]
network.model.passive_branch_s_nom = Var(list(extendable_passive_branches.index),
domain=NonNegativeReals, bounds=branch_s_nom_bounds)
free_pyomo_initializers(network.model.passive_branch_s_nom)
extendable_links = network.links[network.links.p_nom_extendable]
bounds = {b : (extendable_links.at[b,"p_nom_min"],
extendable_links.at[b,"p_nom_max"])
for b in extendable_links.index}
def branch_p_nom_bounds(model, branch_name):
return bounds[branch_name]
network.model.link_p_nom = Var(list(extendable_links.index),
domain=NonNegativeReals, bounds=branch_p_nom_bounds)
free_pyomo_initializers(network.model.link_p_nom)
def define_link_flows(network,snapshots):
extendable_links_i = network.links.index[network.links.p_nom_extendable]
fixed_links_i = network.links.index[~ network.links.p_nom_extendable]
p_max_pu = get_switchable_as_dense(network, 'Link', 'p_max_pu', snapshots)
p_min_pu = get_switchable_as_dense(network, 'Link', 'p_min_pu', snapshots)
fixed_lower = p_min_pu.loc[:,fixed_links_i].multiply(network.links.loc[fixed_links_i, 'p_nom'])
fixed_upper = p_max_pu.loc[:,fixed_links_i].multiply(network.links.loc[fixed_links_i, 'p_nom'])
network.model.link_p = Var(list(network.links.index), snapshots)
p_upper = {(cb, sn) : LConstraint(LExpression([(1, network.model.link_p[cb, sn])],
-fixed_upper.at[sn, cb]),"<=")
for cb in fixed_links_i for sn in snapshots}
p_upper.update({(cb,sn) : LConstraint(LExpression([(1, network.model.link_p[cb, sn]),
(-p_max_pu.at[sn, cb], network.model.link_p_nom[cb])]),
"<=")
for cb in extendable_links_i for sn in snapshots})
l_constraint(network.model, "link_p_upper", p_upper,
list(network.links.index), snapshots)
p_lower = {(cb, sn) : LConstraint(LExpression([(1, network.model.link_p[cb, sn])],
-fixed_lower.at[sn, cb]),">=")
for cb in fixed_links_i for sn in snapshots}
p_lower.update({(cb,sn) : LConstraint(LExpression([(1, network.model.link_p[cb, sn]),
(-p_min_pu.at[sn, cb], network.model.link_p_nom[cb])]),
">=")
for cb in extendable_links_i for sn in snapshots})
l_constraint(network.model, "link_p_lower", p_lower,
list(network.links.index), snapshots)
def define_passive_branch_flows(network,snapshots,formulation="angles",ptdf_tolerance=0.):
if formulation == "angles":
define_passive_branch_flows_with_angles(network,snapshots)
elif formulation == "ptdf":
define_passive_branch_flows_with_PTDF(network,snapshots,ptdf_tolerance)
elif formulation == "cycles":
define_passive_branch_flows_with_cycles(network,snapshots)
elif formulation == "kirchhoff":
define_passive_branch_flows_with_kirchhoff(network,snapshots)
def define_passive_branch_flows_with_angles(network,snapshots):
network.model.voltage_angles = Var(list(network.buses.index), snapshots)
slack = {(sub,sn) :
[[(1,network.model.voltage_angles[network.sub_networks.slack_bus[sub],sn])], "==", 0.]
for sub in network.sub_networks.index for sn in snapshots}
l_constraint(network.model,"slack_angle",slack,list(network.sub_networks.index),snapshots)
passive_branches = network.passive_branches()
network.model.passive_branch_p = Var(list(passive_branches.index), snapshots)
flows = {}
for branch in passive_branches.index:
bus0 = passive_branches.at[branch,"bus0"]
bus1 = passive_branches.at[branch,"bus1"]
bt = branch[0]
bn = branch[1]
sub = passive_branches.at[branch,"sub_network"]
attribute = "r_pu_eff" if network.sub_networks.at[sub,"carrier"] == "DC" else "x_pu_eff"
y = 1/ passive_branches.at[ branch, attribute]
for sn in snapshots:
lhs = LExpression([(y,network.model.voltage_angles[bus0,sn]),
(-y,network.model.voltage_angles[bus1,sn]),
(-1,network.model.passive_branch_p[bt,bn,sn])],
-y*(passive_branches.at[branch,"phase_shift"]*np.pi/180. if bt == "Transformer" else 0.))
flows[bt,bn,sn] = LConstraint(lhs,"==",LExpression())
l_constraint(network.model, "passive_branch_p_def", flows,
list(passive_branches.index), snapshots)
def define_passive_branch_flows_with_PTDF(network,snapshots,ptdf_tolerance=0.):
passive_branches = network.passive_branches()
network.model.passive_branch_p = Var(list(passive_branches.index), snapshots)
flows = {}
for sub_network in network.sub_networks.obj:
find_bus_controls(sub_network)
branches_i = sub_network.branches_i()
if len(branches_i) > 0:
calculate_PTDF(sub_network)
#kill small PTDF values
sub_network.PTDF[abs(sub_network.PTDF) < ptdf_tolerance] = 0
for i,branch in enumerate(branches_i):
bt = branch[0]
bn = branch[1]
for sn in snapshots:
lhs = sum(sub_network.PTDF[i,j]*network._p_balance[bus,sn]
for j,bus in enumerate(sub_network.buses_o)
if sub_network.PTDF[i,j] != 0)
rhs = LExpression([(1,network.model.passive_branch_p[bt,bn,sn])])
flows[bt,bn,sn] = LConstraint(lhs,"==",rhs)
l_constraint(network.model, "passive_branch_p_def", flows,
list(passive_branches.index), snapshots)
def define_sub_network_cycle_constraints( subnetwork, snapshots, passive_branch_p, attribute):
""" Constructs cycle_constraints for a particular subnetwork
"""
sub_network_cycle_constraints = {}
sub_network_cycle_index = []
matrix = subnetwork.C.tocsc()
branches = subnetwork.branches()
for col_j in range( matrix.shape[1] ):
cycle_is = matrix.getcol(col_j).nonzero()[0]
if len(cycle_is) == 0: continue
sub_network_cycle_index.append((subnetwork.name, col_j))
branch_idx_attributes = []
for cycle_i in cycle_is:
branch_idx = branches.index[cycle_i]
attribute_value = 1e5 * branches.at[ branch_idx, attribute] * subnetwork.C[ cycle_i, col_j]
branch_idx_attributes.append( (branch_idx, attribute_value))
for snapshot in snapshots:
expression_list = [ (attribute_value,
passive_branch_p[branch_idx[0], branch_idx[1], snapshot]) for (branch_idx, attribute_value) in branch_idx_attributes]
lhs = LExpression(expression_list)
sub_network_cycle_constraints[subnetwork.name,col_j,snapshot] = LConstraint(lhs,"==",LExpression())
return( sub_network_cycle_index, sub_network_cycle_constraints)
def define_passive_branch_flows_with_cycles(network,snapshots):
for sub_network in network.sub_networks.obj:
find_tree(sub_network)
find_cycles(sub_network)
#following is necessary to calculate angles post-facto
find_bus_controls(sub_network)
if len(sub_network.branches_i()) > 0:
calculate_B_H(sub_network)
passive_branches = network.passive_branches()
network.model.passive_branch_p = Var(list(passive_branches.index), snapshots)
cycle_index = []
cycle_constraints = {}
for subnetwork in network.sub_networks.obj:
branches = subnetwork.branches()
attribute = "r_pu_eff" if network.sub_networks.at[subnetwork.name,"carrier"] == "DC" else "x_pu_eff"
sub_network_cycle_index, sub_network_cycle_constraints = define_sub_network_cycle_constraints( subnetwork,
snapshots,
network.model.passive_branch_p, attribute)
cycle_index.extend( sub_network_cycle_index)
cycle_constraints.update( sub_network_cycle_constraints)
l_constraint(network.model, "cycle_constraints", cycle_constraints,
cycle_index, snapshots)
network.model.cycles = Var(cycle_index, snapshots, domain=Reals, bounds=(None,None))
flows = {}
for subnetwork in network.sub_networks.obj:
branches = subnetwork.branches()
buses = subnetwork.buses()
for i,branch in enumerate(branches.index):
bt = branch[0]
bn = branch[1]
cycle_is = subnetwork.C[i,:].nonzero()[1]
tree_is = subnetwork.T[i,:].nonzero()[1]
if len(cycle_is) + len(tree_is) == 0: logger.error("The cycle formulation does not support infinite impedances, yet.")
for snapshot in snapshots:
expr = LExpression([(subnetwork.C[i,j], network.model.cycles[subnetwork.name,j,snapshot])
for j in cycle_is])
lhs = expr + sum(subnetwork.T[i,j]*network._p_balance[buses.index[j],snapshot]
for j in tree_is)
rhs = LExpression([(1,network.model.passive_branch_p[bt,bn,snapshot])])
flows[bt,bn,snapshot] = LConstraint(lhs,"==",rhs)
l_constraint(network.model, "passive_branch_p_def", flows,
list(passive_branches.index), snapshots)
def define_passive_branch_flows_with_kirchhoff(network,snapshots,skip_vars=False):
""" define passive branch flows with the kirchoff method """
for sub_network in network.sub_networks.obj:
find_tree(sub_network)
find_cycles(sub_network)
#following is necessary to calculate angles post-facto
find_bus_controls(sub_network)
if len(sub_network.branches_i()) > 0:
calculate_B_H(sub_network)
passive_branches = network.passive_branches()
if not skip_vars:
network.model.passive_branch_p = Var(list(passive_branches.index), snapshots)
cycle_index = []
cycle_constraints = {}
for subnetwork in network.sub_networks.obj:
attribute = "r_pu_eff" if network.sub_networks.at[subnetwork.name,"carrier"] == "DC" else "x_pu_eff"
sub_network_cycle_index, sub_network_cycle_constraints = define_sub_network_cycle_constraints( subnetwork,
snapshots,
network.model.passive_branch_p, attribute)
cycle_index.extend( sub_network_cycle_index)
cycle_constraints.update( sub_network_cycle_constraints)
l_constraint(network.model, "cycle_constraints", cycle_constraints,
cycle_index, snapshots)
def define_passive_branch_constraints(network,snapshots):
passive_branches = network.passive_branches()
extendable_branches = passive_branches[passive_branches.s_nom_extendable]
fixed_branches = passive_branches[~ passive_branches.s_nom_extendable]
s_max_pu = pd.concat({c : get_switchable_as_dense(network, c, 's_max_pu', snapshots)
for c in network.passive_branch_components}, axis=1, sort=False)
flow_upper = {(b[0],b[1],sn) : [[(1,network.model.passive_branch_p[b[0],b[1],sn])],
"<=", s_max_pu.at[sn,b]*fixed_branches.at[b,"s_nom"]]
for b in fixed_branches.index
for sn in snapshots}
flow_upper.update({(b[0],b[1],sn) : [[(1,network.model.passive_branch_p[b[0],b[1],sn]),
(-s_max_pu.at[sn,b],network.model.passive_branch_s_nom[b[0],b[1]])],"<=",0]
for b in extendable_branches.index
for sn in snapshots})
l_constraint(network.model, "flow_upper", flow_upper,
list(passive_branches.index), snapshots)
flow_lower = {(b[0],b[1],sn) : [[(1,network.model.passive_branch_p[b[0],b[1],sn])],
">=", -s_max_pu.at[sn,b]*fixed_branches.at[b,"s_nom"]]
for b in fixed_branches.index
for sn in snapshots}
flow_lower.update({(b[0],b[1],sn): [[(1,network.model.passive_branch_p[b[0],b[1],sn]),
(s_max_pu.at[sn,b],network.model.passive_branch_s_nom[b[0],b[1]])],">=",0]
for b in extendable_branches.index
for sn in snapshots})
l_constraint(network.model, "flow_lower", flow_lower,
list(passive_branches.index), snapshots)
def define_nodal_balances(network,snapshots):
"""Construct the nodal balance for all elements except the passive
branches.
Store the nodal balance expression in network._p_balance.
"""
#dictionary for constraints
network._p_balance = {(bus,sn) : LExpression()
for bus in network.buses.index
for sn in snapshots}
efficiency = get_switchable_as_dense(network, 'Link', 'efficiency', snapshots)
for cb in network.links.index:
bus0 = network.links.at[cb,"bus0"]
bus1 = network.links.at[cb,"bus1"]
for sn in snapshots:
network._p_balance[bus0,sn].variables.append((-1,network.model.link_p[cb,sn]))
network._p_balance[bus1,sn].variables.append((efficiency.at[sn,cb],network.model.link_p[cb,sn]))
#Add any other buses to which the links are attached
for i in [int(col[3:]) for col in network.links.columns if col[:3] == "bus" and col not in ["bus0","bus1"]]:
efficiency = get_switchable_as_dense(network, 'Link', 'efficiency{}'.format(i), snapshots)
for cb in network.links.index[network.links["bus{}".format(i)] != ""]:
bus = network.links.at[cb, "bus{}".format(i)]
for sn in snapshots:
network._p_balance[bus,sn].variables.append((efficiency.at[sn,cb],network.model.link_p[cb,sn]))
for gen in network.generators.index:
bus = network.generators.at[gen,"bus"]
sign = network.generators.at[gen,"sign"]
for sn in snapshots:
network._p_balance[bus,sn].variables.append((sign,network.model.generator_p[gen,sn]))
load_p_set = get_switchable_as_dense(network, 'Load', 'p_set', snapshots)
for load in network.loads.index:
bus = network.loads.at[load,"bus"]
sign = network.loads.at[load,"sign"]
for sn in snapshots:
network._p_balance[bus,sn].constant += sign*load_p_set.at[sn,load]
for su in network.storage_units.index:
bus = network.storage_units.at[su,"bus"]
sign = network.storage_units.at[su,"sign"]
for sn in snapshots:
network._p_balance[bus,sn].variables.append((sign,network.model.storage_p_dispatch[su,sn]))
network._p_balance[bus,sn].variables.append((-sign,network.model.storage_p_store[su,sn]))
for store in network.stores.index:
bus = network.stores.at[store,"bus"]
sign = network.stores.at[store,"sign"]
for sn in snapshots:
network._p_balance[bus,sn].variables.append((sign,network.model.store_p[store,sn]))
def define_nodal_balance_constraints(network,snapshots):
passive_branches = network.passive_branches()
for branch in passive_branches.index:
bus0 = passive_branches.at[branch,"bus0"]
bus1 = passive_branches.at[branch,"bus1"]
bt = branch[0]
bn = branch[1]
for sn in snapshots:
network._p_balance[bus0,sn].variables.append((-1,network.model.passive_branch_p[bt,bn,sn]))
network._p_balance[bus1,sn].variables.append((1,network.model.passive_branch_p[bt,bn,sn]))
power_balance = {k: LConstraint(v,"==",LExpression()) for k,v in network._p_balance.items()}
l_constraint(network.model, "power_balance", power_balance,
list(network.buses.index), snapshots)
def define_sub_network_balance_constraints(network,snapshots):
sn_balance = {}
for sub_network in network.sub_networks.obj:
for sn in snapshots:
sn_balance[sub_network.name,sn] = LConstraint(LExpression(),"==",LExpression())
for bus in sub_network.buses().index:
sn_balance[sub_network.name,sn].lhs.variables.extend(network._p_balance[bus,sn].variables)
sn_balance[sub_network.name,sn].lhs.constant += network._p_balance[bus,sn].constant
l_constraint(network.model,"sub_network_balance_constraint", sn_balance,
list(network.sub_networks.index), snapshots)
def define_global_constraints(network,snapshots):
global_constraints = {}
for gc in network.global_constraints.index:
if network.global_constraints.loc[gc,"type"] == "primary_energy":
c = LConstraint(sense=network.global_constraints.loc[gc,"sense"])
c.rhs.constant = network.global_constraints.loc[gc,"constant"]
carrier_attribute = network.global_constraints.loc[gc,"carrier_attribute"]
for carrier in network.carriers.index:
attribute = network.carriers.at[carrier,carrier_attribute]
if attribute == 0.:
continue
#for generators, use the prime mover carrier
gens = network.generators.index[network.generators.carrier == carrier]
c.lhs.variables.extend([(attribute
* (1/network.generators.at[gen,"efficiency"])
* network.snapshot_weightings.generators[sn],
network.model.generator_p[gen,sn])
for gen in gens
for sn in snapshots])
#for storage units, use the prime mover carrier
#take difference of energy at end and start of period
sus = network.storage_units.index[(network.storage_units.carrier == carrier) & (~network.storage_units.cyclic_state_of_charge)]
c.lhs.variables.extend([(-attribute, network.model.state_of_charge[su,snapshots[-1]])
for su in sus])
c.lhs.constant += sum(attribute*network.storage_units.at[su,"state_of_charge_initial"]
for su in sus)
#for stores, inherit the carrier from the bus
#take difference of energy at end and start of period
stores = network.stores.index[(network.stores.bus.map(network.buses.carrier) == carrier) & (~network.stores.e_cyclic)]
c.lhs.variables.extend([(-attribute, network.model.store_e[store,snapshots[-1]])
for store in stores])
c.lhs.constant += sum(attribute*network.stores.at[store,"e_initial"]
for store in stores)
global_constraints[gc] = c
l_constraint(network.model, "global_constraints",
global_constraints, list(network.global_constraints.index))
def define_linear_objective(network,snapshots):
model = network.model
extendable_generators = network.generators[network.generators.p_nom_extendable]
ext_sus = network.storage_units[network.storage_units.p_nom_extendable]
ext_stores = network.stores[network.stores.e_nom_extendable]
passive_branches = network.passive_branches()
extendable_passive_branches = passive_branches[passive_branches.s_nom_extendable]
extendable_links = network.links[network.links.p_nom_extendable]
suc_gens_i = network.generators.index[~network.generators.p_nom_extendable & network.generators.committable & (network.generators.start_up_cost > 0)]
sdc_gens_i = network.generators.index[~network.generators.p_nom_extendable & network.generators.committable & (network.generators.shut_down_cost > 0)]
marginal_cost_it = zip(get_switchable_as_iter(network, 'Generator', 'marginal_cost', snapshots),
get_switchable_as_iter(network, 'StorageUnit', 'marginal_cost', snapshots),
get_switchable_as_iter(network, 'Store', 'marginal_cost', snapshots),
get_switchable_as_iter(network, 'Link', 'marginal_cost', snapshots))
objective = LExpression()
for sn, marginal_cost in zip(snapshots, marginal_cost_it):
gen_mc, su_mc, st_mc, link_mc = marginal_cost
weight = network.snapshot_weightings.objective[sn]
for gen in network.generators.index:
coefficient = gen_mc.at[gen] * weight
objective.variables.extend([(coefficient, model.generator_p[gen, sn])])
for su in network.storage_units.index:
coefficient = su_mc.at[su] * weight
objective.variables.extend([(coefficient, model.storage_p_dispatch[su,sn])])
for store in network.stores.index:
coefficient = st_mc.at[store] * weight
objective.variables.extend([(coefficient, model.store_p[store,sn])])
for link in network.links.index:
coefficient = link_mc.at[link] * weight
objective.variables.extend([(coefficient, model.link_p[link,sn])])
#NB: for capital costs we subtract the costs of existing infrastructure p_nom/s_nom
objective.variables.extend([(extendable_generators.at[gen,"capital_cost"], model.generator_p_nom[gen])
for gen in extendable_generators.index])
objective.constant -= (extendable_generators.capital_cost * extendable_generators.p_nom).zsum()
objective.variables.extend([(ext_sus.at[su,"capital_cost"], model.storage_p_nom[su])
for su in ext_sus.index])
objective.constant -= (ext_sus.capital_cost*ext_sus.p_nom).zsum()
objective.variables.extend([(ext_stores.at[store,"capital_cost"], model.store_e_nom[store])
for store in ext_stores.index])
objective.constant -= (ext_stores.capital_cost*ext_stores.e_nom).zsum()
objective.variables.extend([(extendable_passive_branches.at[b,"capital_cost"], model.passive_branch_s_nom[b])
for b in extendable_passive_branches.index])
objective.constant -= (extendable_passive_branches.capital_cost * extendable_passive_branches.s_nom).zsum()
objective.variables.extend([(extendable_links.at[b,"capital_cost"], model.link_p_nom[b])
for b in extendable_links.index])
objective.constant -= (extendable_links.capital_cost * extendable_links.p_nom).zsum()
network.objective_constant = - objective.constant
## Unit commitment costs
objective.variables.extend([(1, model.generator_start_up_cost[gen,sn]) for gen in suc_gens_i for sn in snapshots])
objective.variables.extend([(1, model.generator_shut_down_cost[gen,sn]) for gen in sdc_gens_i for sn in snapshots])
l_objective(model,objective)
def extract_optimisation_results(network, snapshots, formulation="angles", free_pyomo=True,
extra_postprocessing=None):
allocate_series_dataframes(network, {'Generator': ['p'],
'Load': ['p'],
'StorageUnit': ['p', 'state_of_charge', 'spill'],
'Store': ['p', 'e'],
'Bus': ['p', 'v_ang', 'v_mag_pu', 'marginal_price'],
'Line': ['p0', 'p1', 'mu_lower', 'mu_upper'],
'Transformer': ['p0', 'p1', 'mu_lower', 'mu_upper'],
'Link': ["p"+col[3:] for col in network.links.columns if col[:3] == "bus"]
+['mu_lower', 'mu_upper']})
#get value of objective function
network.objective = network.results["Problem"][0]["Upper bound"]
model = network.model
duals = pd.Series(list(model.dual.values()), index=pd.Index(list(model.dual.keys())),
dtype=float)
if free_pyomo:
model.dual.clear()
def clear_indexedvar(indexedvar):
for v in indexedvar._data.values():
v.clear()
def get_values(indexedvar, free=free_pyomo):
s = pd.Series(indexedvar.get_values(), dtype=float)
if free:
clear_indexedvar(indexedvar)
return s
def set_from_series(df, series):
df.loc[snapshots] = series.unstack(0).reindex(columns=df.columns)
def get_shadows(constraint, multiind=True):
if len(constraint) == 0: return pd.Series(dtype=float)
index = list(constraint.keys())
if multiind:
index = pd.MultiIndex.from_tuples(index)
cdata = pd.Series(list(constraint.values()), index=index)
return cdata.map(duals)
if len(network.generators):
set_from_series(network.generators_t.p, get_values(model.generator_p))
if len(network.storage_units):
set_from_series(network.storage_units_t.p,
get_values(model.storage_p_dispatch)
- get_values(model.storage_p_store))
set_from_series(network.storage_units_t.state_of_charge,
get_values(model.state_of_charge))
if (network.storage_units_t.inflow.max() > 0).any():
set_from_series(network.storage_units_t.spill,
get_values(model.storage_p_spill))
network.storage_units_t.spill.fillna(0, inplace=True) #p_spill doesn't exist if inflow=0
if len(network.stores):
set_from_series(network.stores_t.p, get_values(model.store_p))
set_from_series(network.stores_t.e, get_values(model.store_e))
if len(network.loads):
load_p_set = get_switchable_as_dense(network, 'Load', 'p_set', snapshots)
network.loads_t["p"].loc[snapshots] = load_p_set.loc[snapshots]
if len(network.buses):
network.buses_t.p.loc[snapshots] = \
pd.concat({c.name:
c.pnl.p.loc[snapshots].multiply(c.df.sign, axis=1)
.groupby(c.df.bus, axis=1).sum()
for c in network.iterate_components(network.controllable_one_port_components)},
sort=False) \
.sum(level=1) \
.reindex(columns=network.buses_t.p.columns, fill_value=0.)
# passive branches
passive_branches = get_values(model.passive_branch_p)
flow_lower = get_shadows(model.flow_lower)
flow_upper = get_shadows(model.flow_upper)
for c in network.iterate_components(network.passive_branch_components):
set_from_series(c.pnl.p0, passive_branches.loc[c.name])
c.pnl.p1.loc[snapshots] = - c.pnl.p0.loc[snapshots]
set_from_series(c.pnl.mu_lower, flow_lower[c.name])
set_from_series(c.pnl.mu_upper, -flow_upper[c.name])
del flow_lower, flow_upper
# active branches
if len(network.links):
set_from_series(network.links_t.p0, get_values(model.link_p))
efficiency = get_switchable_as_dense(network, 'Link', 'efficiency', snapshots)
network.links_t.p1.loc[snapshots] = - network.links_t.p0.loc[snapshots]*efficiency.loc[snapshots,:]
network.buses_t.p.loc[snapshots] -= (network.links_t.p0.loc[snapshots]
.groupby(network.links.bus0, axis=1).sum()
.reindex(columns=network.buses_t.p.columns, fill_value=0.))
network.buses_t.p.loc[snapshots] -= (network.links_t.p1.loc[snapshots]
.groupby(network.links.bus1, axis=1).sum()
.reindex(columns=network.buses_t.p.columns, fill_value=0.))
#Add any other buses to which the links are attached
for i in [int(col[3:]) for col in network.links.columns if col[:3] == "bus" and col not in ["bus0","bus1"]]:
efficiency = get_switchable_as_dense(network, 'Link', 'efficiency{}'.format(i), snapshots)
p_name = "p{}".format(i)
links = network.links.index[network.links["bus{}".format(i)] != ""]
network.links_t[p_name].loc[snapshots, links] = - network.links_t.p0.loc[snapshots, links]*efficiency.loc[snapshots, links]
network.buses_t.p.loc[snapshots] -= (network.links_t[p_name].loc[snapshots, links]
.groupby(network.links["bus{}".format(i)], axis=1).sum()
.reindex(columns=network.buses_t.p.columns, fill_value=0.))
set_from_series(network.links_t.mu_lower, get_shadows(model.link_p_lower))
set_from_series(network.links_t.mu_upper, - get_shadows(model.link_p_upper))
if len(network.buses):
if formulation in {'angles', 'kirchhoff'}:
set_from_series(network.buses_t.marginal_price,
pd.Series(list(model.power_balance.values()),
index=pd.MultiIndex.from_tuples(list(model.power_balance.keys())))
.map(duals))
#correct for snapshot weightings
network.buses_t.marginal_price.loc[snapshots] = (
network.buses_t.marginal_price.loc[snapshots].divide(
network.snapshot_weightings.objective.loc[snapshots],axis=0))
if formulation == "angles":
set_from_series(network.buses_t.v_ang,
get_values(model.voltage_angles))
elif formulation in ["ptdf","cycles","kirchhoff"]:
for sn in network.sub_networks.obj:
network.buses_t.v_ang.loc[snapshots,sn.slack_bus] = 0.
if len(sn.pvpqs) > 0:
network.buses_t.v_ang.loc[snapshots,sn.pvpqs] = spsolve(sn.B[1:, 1:], network.buses_t.p.loc[snapshots,sn.pvpqs].T).T
network.buses_t.v_mag_pu.loc[snapshots,network.buses.carrier=="AC"] = 1.
network.buses_t.v_mag_pu.loc[snapshots,network.buses.carrier=="DC"] = 1 + network.buses_t.v_ang.loc[snapshots,network.buses.carrier=="DC"]
#now that we've used the angles to calculate the flow, set the DC ones to zero
network.buses_t.v_ang.loc[snapshots,network.buses.carrier=="DC"] = 0.
network.generators.p_nom_opt = network.generators.p_nom
network.generators.loc[network.generators.p_nom_extendable, 'p_nom_opt'] = \
get_values(network.model.generator_p_nom)
network.storage_units.p_nom_opt = network.storage_units.p_nom
network.storage_units.loc[network.storage_units.p_nom_extendable, 'p_nom_opt'] = \
get_values(network.model.storage_p_nom)
network.stores.e_nom_opt = network.stores.e_nom
network.stores.loc[network.stores.e_nom_extendable, 'e_nom_opt'] = \
get_values(network.model.store_e_nom)
s_nom_extendable_passive_branches = get_values(model.passive_branch_s_nom)
for c in network.iterate_components(network.passive_branch_components):
c.df['s_nom_opt'] = c.df.s_nom
if c.df.s_nom_extendable.any():
c.df.loc[c.df.s_nom_extendable, 's_nom_opt'] = s_nom_extendable_passive_branches.loc[c.name]
network.links.p_nom_opt = network.links.p_nom
network.links.loc[network.links.p_nom_extendable, "p_nom_opt"] = \
get_values(network.model.link_p_nom)
try:
network.global_constraints.loc[:,"mu"] = - get_shadows(model.global_constraints, multiind=False)
except (AttributeError, KeyError):
logger.warning("Could not read out global constraint shadow prices")
#extract unit commitment statuses
if network.generators.committable.any():
allocate_series_dataframes(network, {'Generator': ['status']})
fixed_committable_gens_i = network.generators.index[~network.generators.p_nom_extendable & network.generators.committable]
if len(fixed_committable_gens_i) > 0:
network.generators_t.status.loc[snapshots,fixed_committable_gens_i] = \
get_values(model.generator_status).unstack(0)
if extra_postprocessing is not None:
extra_postprocessing(network, snapshots, duals)
def network_lopf_build_model(network, snapshots=None, skip_pre=False,
formulation="angles", ptdf_tolerance=0.):
"""
Build pyomo model for linear optimal power flow for a group of snapshots.
Parameters
----------
snapshots : list or index slice
A list of snapshots to optimise, must be a subset of
network.snapshots, defaults to network.snapshots
skip_pre : bool, default False
Skip the preliminary steps of computing topology, calculating
dependent values and finding bus controls.
formulation : string
Formulation of the linear power flow equations to use; must be
one of ["angles","cycles","kirchhoff","ptdf"]
ptdf_tolerance : float
Value below which PTDF entries are ignored
Returns
-------
network.model
"""
if isinstance(network.snapshots, pd.MultiIndex):
raise NotImplementedError("Optimization with multiindexed snapshots "
"using pyomo is not supported.")
if not skip_pre:
network.determine_network_topology()
calculate_dependent_values(network)
for sub_network in network.sub_networks.obj:
find_slack_bus(sub_network)
logger.info("Performed preliminary steps")
snapshots = _as_snapshots(network, snapshots)
logger.info("Building pyomo model using `%s` formulation", formulation)
network.model = ConcreteModel("Linear Optimal Power Flow")
define_generator_variables_constraints(network,snapshots)
define_storage_variables_constraints(network,snapshots)
define_store_variables_constraints(network,snapshots)
define_branch_extension_variables(network,snapshots)
define_link_flows(network,snapshots)
define_nodal_balances(network,snapshots)
define_passive_branch_flows(network,snapshots,formulation,ptdf_tolerance)
define_passive_branch_constraints(network,snapshots)
if formulation in ["angles", "kirchhoff"]:
define_nodal_balance_constraints(network,snapshots)
elif formulation in ["ptdf", "cycles"]:
define_sub_network_balance_constraints(network,snapshots)
define_global_constraints(network,snapshots)
define_linear_objective(network, snapshots)
#tidy up auxilliary expressions
del network._p_balance
#force solver to also give us the dual prices
network.model.dual = Suffix(direction=Suffix.IMPORT)
return network.model
def network_lopf_prepare_solver(network, solver_name="glpk", solver_io=None):
"""
Prepare solver for linear optimal power flow.
Parameters
----------
solver_name : string
Must be a solver name that pyomo recognises and that is
installed, e.g. "glpk", "gurobi"
solver_io : string, default None
Solver Input-Output option, e.g. "python" to use "gurobipy" for
solver_name="gurobi"
Returns
-------
None
"""
network.opt = SolverFactory(solver_name, solver_io=solver_io)
patch_optsolver_record_memusage_before_solving(network.opt, network)
if isinstance(network.opt, PersistentSolver):
network.opt.set_instance(network.model)
return network.opt
def network_lopf_solve(network, snapshots=None, formulation="angles", solver_options={},solver_logfile=None, keep_files=False,
free_memory={'pyomo'},extra_postprocessing=None):
"""
Solve linear optimal power flow for a group of snapshots and extract results.
Parameters
----------
snapshots : list or index slice
A list of snapshots to optimise, must be a subset of
network.snapshots, defaults to network.snapshots
formulation : string
Formulation of the linear power flow equations to use; must be one of
["angles","cycles","kirchhoff","ptdf"]; must match formulation used for
building the model.
solver_options : dictionary
A dictionary with additional options that get passed to the solver.
(e.g. {'threads':2} tells gurobi to use only 2 cpus)
solver_logfile : None|string
If not None, sets the logfile option of the solver.
keep_files : bool, default False
Keep the files that pyomo constructs from OPF problem
construction, e.g. .lp file - useful for debugging
free_memory : set, default {'pyomo'}
Any subset of {'pypsa', 'pyomo'}. Allows to stash `pypsa` time-series
data away while the solver runs (as a pickle to disk) and/or free
`pyomo` data after the solution has been extracted.
extra_postprocessing : callable function
This function must take three arguments
`extra_postprocessing(network,snapshots,duals)` and is called after
the model has solved and the results are extracted. It allows the user to
extract further information about the solution, such as additional shadow prices.
Returns
-------
None
"""
snapshots = _as_snapshots(network, snapshots)
logger.info("Solving model using %s", network.opt.name)
if isinstance(network.opt, PersistentSolver):
args = []
else:
args = [network.model]
if isinstance(free_memory, str):
free_memory = {free_memory}
if 'pypsa' in free_memory:
with empty_network(network):
network.results = network.opt.solve(*args, suffixes=["dual"], keepfiles=keep_files, logfile=solver_logfile, options=solver_options)
else:
network.results = network.opt.solve(*args, suffixes=["dual"], keepfiles=keep_files, logfile=solver_logfile, options=solver_options)
if logger.isEnabledFor(logging.INFO):
network.results.write()
status = network.results["Solver"][0]["Status"]
termination_condition = network.results["Solver"][0]["Termination condition"]
if status == "ok" and termination_condition == "optimal":
logger.info("Optimization successful")
extract_optimisation_results(network, snapshots, formulation,
free_pyomo='pyomo' in free_memory,
extra_postprocessing=extra_postprocessing)
elif status == "warning" and termination_condition == "other":
logger.warning("WARNING! Optimization might be sub-optimal. Writing output anyway")
extract_optimisation_results(network, snapshots, formulation,
free_pyomo='pyomo' in free_memory,
extra_postprocessing=extra_postprocessing)
else:
logger.error("Optimisation failed with status %s and terminal condition %s"
% (status, termination_condition))
return status, termination_condition
def network_lopf(network, snapshots=None, solver_name="glpk", solver_io=None,
skip_pre=False, extra_functionality=None, solver_logfile=None, solver_options={},
keep_files=False, formulation="angles", ptdf_tolerance=0.,
free_memory={},extra_postprocessing=None):
"""
Linear optimal power flow for a group of snapshots.
Parameters
----------
snapshots : list or index slice
A list of snapshots to optimise, must be a subset of
network.snapshots, defaults to network.snapshots
solver_name : string
Must be a solver name that pyomo recognises and that is
installed, e.g. "glpk", "gurobi"
solver_io : string, default None
Solver Input-Output option, e.g. "python" to use "gurobipy" for
solver_name="gurobi"
skip_pre : bool, default False
Skip the preliminary steps of computing topology, calculating
dependent values and finding bus controls.
extra_functionality : callable function
This function must take two arguments
`extra_functionality(network,snapshots)` and is called after
the model building is complete, but before it is sent to the
solver. It allows the user to
add/change constraints and add/change the objective function.
solver_logfile : None|string
If not None, sets the logfile option of the solver.
solver_options : dictionary
A dictionary with additional options that get passed to the solver.
(e.g. {'threads':2} tells gurobi to use only 2 cpus)
keep_files : bool, default False
Keep the files that pyomo constructs from OPF problem
construction, e.g. .lp file - useful for debugging
formulation : string
Formulation of the linear power flow equations to use; must be
one of ["angles","cycles","kirchhoff","ptdf"]
ptdf_tolerance : float
Value below which PTDF entries are ignored
free_memory : set, default {'pyomo'}
Any subset of {'pypsa', 'pyomo'}. Allows to stash `pypsa` time-series
data away while the solver runs (as a pickle to disk) and/or free
`pyomo` data after the solution has been extracted.
extra_postprocessing : callable function
This function must take three arguments
`extra_postprocessing(network,snapshots,duals)` and is called after
the model has solved and the results are extracted. It allows the user to
extract further information about the solution, such as additional shadow prices.
Returns
-------
None
"""
snapshots = _as_snapshots(network, snapshots)
network_lopf_build_model(network, snapshots, skip_pre=skip_pre,
formulation=formulation, ptdf_tolerance=ptdf_tolerance)
if extra_functionality is not None:
extra_functionality(network,snapshots)
network_lopf_prepare_solver(network, solver_name=solver_name,
solver_io=solver_io)
return network_lopf_solve(network, snapshots, formulation=formulation,
solver_logfile=solver_logfile, solver_options=solver_options,
keep_files=keep_files, free_memory=free_memory,
extra_postprocessing=extra_postprocessing)
|
gpl-3.0
|
bthirion/nipy
|
tools/run_log_examples.py
|
4
|
6007
|
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function, with_statement
DESCRIP = 'Run and log examples'
EPILOG = \
""" Run examples in directory
Typical usage is:
run_log_examples.py nipy/examples --log-path=~/tmp/eg_logs
to run the examples and log the result, or
run_log_examples.py nipy/examples/some_example.py
to run a single example.
"""
import sys
import os
from os.path import (abspath, expanduser, join as pjoin, sep as psep, isfile,
dirname)
from subprocess import Popen, PIPE
import re
from nibabel.py3k import asstr
from nipy.externals.argparse import (ArgumentParser,
RawDescriptionHelpFormatter)
PYTHON=sys.executable
NEED_SHELL = True
class ProcLogger(object):
def __init__(self, log_path, working_path):
self.log_path = log_path
self.working_path = working_path
self._names = []
def cmd_str_maker(self, cmd, args):
return " ".join([cmd] + list(args))
def __call__(self, cmd_name, cmd, args=(), cwd=None):
# Mqke log files
if cmd_name in self._names:
raise ValueError('Command name {0} not unique'.format(cmd_name))
self._names.append(cmd_name)
if cwd is None:
cwd = self.working_path
cmd_out_path = pjoin(self.log_path, cmd_name)
stdout_log = open(cmd_out_path + '.stdout', 'wt')
stderr_log = open(cmd_out_path + '.stderr', 'wt')
try:
# Start subprocess
cmd_str = self.cmd_str_maker(cmd, args)
proc = Popen(cmd_str,
cwd = cwd,
stdout = stdout_log,
stderr = stderr_log,
shell = NEED_SHELL)
# Execute
retcode = proc.wait()
finally:
if proc.poll() is None: # In case we get killed
proc.terminate()
stdout_log.close()
stderr_log.close()
return retcode
def run_pipes(self, cmd, args=(), cwd=None):
if cwd is None:
cwd = self.working_path
try:
# Start subprocess
cmd_str = self.cmd_str_maker(cmd, args)
proc = Popen(cmd_str,
cwd = cwd,
stdout = PIPE,
stderr = PIPE,
shell = NEED_SHELL)
# Execute
stdout, stderr = proc.communicate()
finally:
if proc.poll() is None: # In case we get killed
proc.terminate()
return asstr(stdout), asstr(stderr), proc.returncode
class PyProcLogger(ProcLogger):
def cmd_str_maker(self, cmd, args):
""" Execute python script `cmd`
Reject any `args` because we're using ``exec`` to execute the script.
Prepend some matplotlib setup to suppress figures
"""
if len(args) != 0:
raise ValueError("Cannot use args with {8}".format(self.__class__))
return("""{0} -c "import matplotlib as mpl; mpl.use('agg'); """
"""exec(open('{1}', 'rt').read())" """.format(PYTHON, cmd))
def _record(result, fname, fileobj):
print(result)
fileobj.write('{0}: {1}\n'.format(fname, result))
def main():
parser = ArgumentParser(description=DESCRIP,
epilog=EPILOG,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('examples_path', type=str,
help='filename of example or directory containing '
'examples to run')
parser.add_argument('--log-path', type=str, default='',
help='path for output logs (default is cwd)')
parser.add_argument('--excludex', type=str, action='append', default=[],
help='regex for files to exclude (add more than one '
'--excludex option for more than one regex filter')
args = parser.parse_args()
# Proc runner
eg_path = abspath(expanduser(args.examples_path))
if args.log_path == '':
log_path = abspath(os.getcwd())
else:
log_path = abspath(expanduser(args.log_path))
excludexes = [re.compile(s) for s in args.excludex]
if isfile(eg_path): # example was a file
proc_logger = PyProcLogger(log_path=log_path,
working_path=dirname(eg_path))
print("Running " + eg_path)
stdout, stderr, code = proc_logger.run_pipes(eg_path)
print('==== Stdout ====')
print(stdout)
print('==== Stderr ====')
print(stderr)
sys.exit(code)
# Multi-run with logging to file
proc_logger = PyProcLogger(log_path=log_path,
working_path=eg_path)
fails = 0
with open(pjoin(log_path, 'summary.txt'), 'wt') as f:
for dirpath, dirnames, filenames in os.walk(eg_path):
for fname in filenames:
full_fname = pjoin(dirpath, fname)
if fname.endswith(".py"):
print(fname, end=': ')
sys.stdout.flush()
for excludex in excludexes:
if excludex.search(fname):
_record('SKIP', fname, f)
break
else: # run test
cmd_name = full_fname.replace(eg_path + psep, '')
cmd_name = cmd_name.replace(psep, '-')
code = proc_logger(cmd_name, full_fname, cwd=dirpath)
if code == 0:
_record('OK', fname, f)
else:
fails += 1
_record('FAIL', fname, f)
sys.exit(fails if fails < 255 else 255)
if __name__ == '__main__':
main()
|
bsd-3-clause
|
austinban/ai-final-project
|
grading/neuralNet-submissions.py
|
4
|
2217
|
import importlib
import traceback
from grading.util import roster, print_table
# from logic import FolKB
# from utils import expr
import os
from sklearn.neural_network import MLPClassifier
mlpc = MLPClassifier()
def indent(howMuch = 1):
space = ' '
for i in range(1, howMuch):
space += ' '
return space
def tryOne(label, fAndP):
frame = fAndP['frame']
if 'mlpc' in fAndP.keys():
clf = fAndP['mlpc']
else:
clf = mlpc
try:
fit = clf.fit(frame.data, frame.target)
except:
pass
print('')
# print_table(fit.theta_,
# header=[frame.feature_names],
# topLeft=[label],
# leftColumn=frame.target_names,
# numfmt='%6.3f',
# njust='center',
# tjust='rjust',
# )
y_pred = fit.predict(frame.data)
print("Number of mislabeled points out of a total %d points : %d"
% (len(frame.data), (frame.target != y_pred).sum()))
def tryExamples(examples):
for label in examples:
example = examples[label]
main = getattr(example, 'main', None)
if main != None:
example.main()
else:
tryOne(label, example)
submissions = {}
scores = {}
message1 = 'Submissions that compile:'
root = os.getcwd()
for student in roster:
try:
os.chdir(root + '/submissions/' + student)
# http://stackoverflow.com/a/17136796/2619926
mod = importlib.import_module('submissions.' + student + '.myNN')
submissions[student] = mod.Examples
message1 += ' ' + student
except ImportError:
pass
except:
traceback.print_exc()
os.chdir(root)
print(message1)
print('----------------------------------------')
for student in roster:
if not student in submissions.keys():
continue
scores[student] = []
try:
examples = submissions[student]
print('Bayesian Networks from:', student)
tryExamples(examples)
except:
traceback.print_exc()
print(student + ' scores ' + str(scores[student]) + ' = ' + str(sum(scores[student])))
print('----------------------------------------')
|
mit
|
CompPhysics/MachineLearning
|
doc/src/LectureNotes/_build/jupyter_execute/chapter6.py
|
1
|
92864
|
# Neural networks, from the simple perceptron to deep learning
## To do list
* write code for single perceptron model and make link with linear regression
* revise initial info and add references
* Update tensorflow material, with keras
* think of adding material about pytorch
* rework pulsar example and breast cancer example
* add ising model example for both regression and classification
* make data on gravitational problem, add reference to articles on uncovering physical laws from ML
* think of genetic data
## Neural networks
Artificial neural networks are computational systems that can learn to
perform tasks by considering examples, generally without being
programmed with any task-specific rules. It is supposed to mimic a
biological system, wherein neurons interact by sending signals in the
form of mathematical functions between layers. All layers can contain
an arbitrary number of neurons, and each connection is represented by
a weight variable.
## Artificial neurons
The field of artificial neural networks has a long history of
development, and is closely connected with the advancement of computer
science and computers in general. A model of artificial neurons was
first developed by McCulloch and Pitts in 1943 to study signal
processing in the brain and has later been refined by others. The
general idea is to mimic neural networks in the human brain, which is
composed of billions of neurons that communicate with each other by
sending electrical signals. Each neuron accumulates its incoming
signals, which must exceed an activation threshold to yield an
output. If the threshold is not overcome, the neuron remains inactive,
i.e. has zero output.
This behaviour has inspired a simple mathematical model for an artificial neuron.
<!-- Equation labels as ordinary links -->
<div id="artificialNeuron"></div>
$$
\begin{equation}
y = f\left(\sum_{i=1}^n w_ix_i\right) = f(u)
\label{artificialNeuron} \tag{1}
\end{equation}
$$
Here, the output $y$ of the neuron is the value of its activation function, which have as input
a weighted sum of signals $x_i, \dots ,x_n$ received by $n$ other neurons.
Conceptually, it is helpful to divide neural networks into four
categories:
1. general purpose neural networks for supervised learning,
2. neural networks designed specifically for image processing, the most prominent example of this class being Convolutional Neural Networks (CNNs),
3. neural networks for sequential data such as Recurrent Neural Networks (RNNs), and
4. neural networks for unsupervised learning such as Deep Boltzmann Machines.
In natural science, DNNs and CNNs have already found numerous
applications. In statistical physics, they have been applied to detect
phase transitions in 2D Ising and Potts models, lattice gauge
theories, and different phases of polymers, or solving the
Navier-Stokes equation in weather forecasting. Deep learning has also
found interesting applications in quantum physics. Various quantum
phase transitions can be detected and studied using DNNs and CNNs,
topological phases, and even non-equilibrium many-body
localization. Representing quantum states as DNNs quantum state
tomography are among some of the impressive achievements to reveal the
potential of DNNs to facilitate the study of quantum systems.
In quantum information theory, it has been shown that one can perform
gate decompositions with the help of neural.
The applications are not limited to the natural sciences. There is a
plethora of applications in essentially all disciplines, from the
humanities to life science and medicine.
## Neural network types
An artificial neural network (ANN), is a computational model that
consists of layers of connected neurons, or nodes or units. We will
refer to these interchangeably as units or nodes, and sometimes as
neurons.
It is supposed to mimic a biological nervous system by letting each
neuron interact with other neurons by sending signals in the form of
mathematical functions between layers. A wide variety of different
ANNs have been developed, but most of them consist of an input layer,
an output layer and eventual layers in-between, called *hidden
layers*. All layers can contain an arbitrary number of nodes, and each
connection between two nodes is associated with a weight variable.
Neural networks (also called neural nets) are neural-inspired
nonlinear models for supervised learning. As we will see, neural nets
can be viewed as natural, more powerful extensions of supervised
learning methods such as linear and logistic regression and soft-max
methods we discussed earlier.
## Feed-forward neural networks
The feed-forward neural network (FFNN) was the first and simplest type
of ANNs that were devised. In this network, the information moves in
only one direction: forward through the layers.
Nodes are represented by circles, while the arrows display the
connections between the nodes, including the direction of information
flow. Additionally, each arrow corresponds to a weight variable
(figure to come). We observe that each node in a layer is connected
to *all* nodes in the subsequent layer, making this a so-called
*fully-connected* FFNN.
## Convolutional Neural Network
A different variant of FFNNs are *convolutional neural networks*
(CNNs), which have a connectivity pattern inspired by the animal
visual cortex. Individual neurons in the visual cortex only respond to
stimuli from small sub-regions of the visual field, called a receptive
field. This makes the neurons well-suited to exploit the strong
spatially local correlation present in natural images. The response of
each neuron can be approximated mathematically as a convolution
operation. (figure to come)
Convolutional neural networks emulate the behaviour of neurons in the
visual cortex by enforcing a *local* connectivity pattern between
nodes of adjacent layers: Each node in a convolutional layer is
connected only to a subset of the nodes in the previous layer, in
contrast to the fully-connected FFNN. Often, CNNs consist of several
convolutional layers that learn local features of the input, with a
fully-connected layer at the end, which gathers all the local data and
produces the outputs. They have wide applications in image and video
recognition.
## Recurrent neural networks
So far we have only mentioned ANNs where information flows in one
direction: forward. *Recurrent neural networks* on the other hand,
have connections between nodes that form directed *cycles*. This
creates a form of internal memory which are able to capture
information on what has been calculated before; the output is
dependent on the previous computations. Recurrent NNs make use of
sequential information by performing the same task for every element
in a sequence, where each element depends on previous elements. An
example of such information is sentences, making recurrent NNs
especially well-suited for handwriting and speech recognition.
## Other types of networks
There are many other kinds of ANNs that have been developed. One type
that is specifically designed for interpolation in multidimensional
space is the radial basis function (RBF) network. RBFs are typically
made up of three layers: an input layer, a hidden layer with
non-linear radial symmetric activation functions and a linear output
layer (''linear'' here means that each node in the output layer has a
linear activation function). The layers are normally fully-connected
and there are no cycles, thus RBFs can be viewed as a type of
fully-connected FFNN. They are however usually treated as a separate
type of NN due the unusual activation functions.
## Multilayer perceptrons
One uses often so-called fully-connected feed-forward neural networks
with three or more layers (an input layer, one or more hidden layers
and an output layer) consisting of neurons that have non-linear
activation functions.
Such networks are often called *multilayer perceptrons* (MLPs).
## Why multilayer perceptrons?
According to the *Universal approximation theorem*, a feed-forward
neural network with just a single hidden layer containing a finite
number of neurons can approximate a continuous multidimensional
function to arbitrary accuracy, assuming the activation function for
the hidden layer is a **non-constant, bounded and
monotonically-increasing continuous function**.
Note that the requirements on the activation function only applies to
the hidden layer, the output nodes are always assumed to be linear, so
as to not restrict the range of output values.
## Mathematical model
The output $y$ is produced via the activation function $f$
$$
y = f\left(\sum_{i=1}^n w_ix_i + b_i\right) = f(z),
$$
This function receives $x_i$ as inputs.
Here the activation $z=(\sum_{i=1}^n w_ix_i+b_i)$.
In an FFNN of such neurons, the *inputs* $x_i$ are the *outputs* of
the neurons in the preceding layer. Furthermore, an MLP is
fully-connected, which means that each neuron receives a weighted sum
of the outputs of *all* neurons in the previous layer.
## Mathematical model
First, for each node $i$ in the first hidden layer, we calculate a weighted sum $z_i^1$ of the input coordinates $x_j$,
<!-- Equation labels as ordinary links -->
<div id="_auto1"></div>
$$
\begin{equation} z_i^1 = \sum_{j=1}^{M} w_{ij}^1 x_j + b_i^1
\label{_auto1} \tag{2}
\end{equation}
$$
Here $b_i$ is the so-called bias which is normally needed in
case of zero activation weights or inputs. How to fix the biases and
the weights will be discussed below. The value of $z_i^1$ is the
argument to the activation function $f_i$ of each node $i$, The
variable $M$ stands for all possible inputs to a given node $i$ in the
first layer. We define the output $y_i^1$ of all neurons in layer 1 as
<!-- Equation labels as ordinary links -->
<div id="outputLayer1"></div>
$$
\begin{equation}
y_i^1 = f(z_i^1) = f\left(\sum_{j=1}^M w_{ij}^1 x_j + b_i^1\right)
\label{outputLayer1} \tag{3}
\end{equation}
$$
where we assume that all nodes in the same layer have identical
activation functions, hence the notation $f$. In general, we could assume in the more general case that different layers have different activation functions.
In this case we would identify these functions with a superscript $l$ for the $l$-th layer,
<!-- Equation labels as ordinary links -->
<div id="generalLayer"></div>
$$
\begin{equation}
y_i^l = f^l(u_i^l) = f^l\left(\sum_{j=1}^{N_{l-1}} w_{ij}^l y_j^{l-1} + b_i^l\right)
\label{generalLayer} \tag{4}
\end{equation}
$$
where $N_l$ is the number of nodes in layer $l$. When the output of
all the nodes in the first hidden layer are computed, the values of
the subsequent layer can be calculated and so forth until the output
is obtained.
## Mathematical model
The output of neuron $i$ in layer 2 is thus,
<!-- Equation labels as ordinary links -->
<div id="_auto2"></div>
$$
\begin{equation}
y_i^2 = f^2\left(\sum_{j=1}^N w_{ij}^2 y_j^1 + b_i^2\right)
\label{_auto2} \tag{5}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="outputLayer2"></div>
$$
\begin{equation}
= f^2\left[\sum_{j=1}^N w_{ij}^2f^1\left(\sum_{k=1}^M w_{jk}^1 x_k + b_j^1\right) + b_i^2\right]
\label{outputLayer2} \tag{6}
\end{equation}
$$
where we have substituted $y_k^1$ with the inputs $x_k$. Finally, the ANN output reads
<!-- Equation labels as ordinary links -->
<div id="_auto3"></div>
$$
\begin{equation}
y_i^3 = f^3\left(\sum_{j=1}^N w_{ij}^3 y_j^2 + b_i^3\right)
\label{_auto3} \tag{7}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto4"></div>
$$
\begin{equation}
= f_3\left[\sum_{j} w_{ij}^3 f^2\left(\sum_{k} w_{jk}^2 f^1\left(\sum_{m} w_{km}^1 x_m + b_k^1\right) + b_j^2\right)
+ b_1^3\right]
\label{_auto4} \tag{8}
\end{equation}
$$
## Mathematical model
We can generalize this expression to an MLP with $l$ hidden
layers. The complete functional form is,
<!-- Equation labels as ordinary links -->
<div id="completeNN"></div>
$$
\begin{equation}
y^{l+1}_i = f^{l+1}\left[\!\sum_{j=1}^{N_l} w_{ij}^3 f^l\left(\sum_{k=1}^{N_{l-1}}w_{jk}^{l-1}\left(\dots f^1\left(\sum_{n=1}^{N_0} w_{mn}^1 x_n+ b_m^1\right)\dots\right)+b_k^2\right)+b_1^3\right]
\label{completeNN} \tag{9}
\end{equation}
$$
which illustrates a basic property of MLPs: The only independent
variables are the input values $x_n$.
## Mathematical model
This confirms that an MLP, despite its quite convoluted mathematical
form, is nothing more than an analytic function, specifically a
mapping of real-valued vectors $\hat{x} \in \mathbb{R}^n \rightarrow
\hat{y} \in \mathbb{R}^m$.
Furthermore, the flexibility and universality of an MLP can be
illustrated by realizing that the expression is essentially a nested
sum of scaled activation functions of the form
<!-- Equation labels as ordinary links -->
<div id="_auto5"></div>
$$
\begin{equation}
f(x) = c_1 f(c_2 x + c_3) + c_4
\label{_auto5} \tag{10}
\end{equation}
$$
where the parameters $c_i$ are weights and biases. By adjusting these
parameters, the activation functions can be shifted up and down or
left and right, change slope or be rescaled which is the key to the
flexibility of a neural network.
### Matrix-vector notation
We can introduce a more convenient notation for the activations in an A NN.
Additionally, we can represent the biases and activations
as layer-wise column vectors $\hat{b}_l$ and $\hat{y}_l$, so that the $i$-th element of each vector
is the bias $b_i^l$ and activation $y_i^l$ of node $i$ in layer $l$ respectively.
We have that $\mathrm{W}_l$ is an $N_{l-1} \times N_l$ matrix, while $\hat{b}_l$ and $\hat{y}_l$ are $N_l \times 1$ column vectors.
With this notation, the sum becomes a matrix-vector multiplication, and we can write
the equation for the activations of hidden layer 2 (assuming three nodes for simplicity) as
<!-- Equation labels as ordinary links -->
<div id="_auto6"></div>
$$
\begin{equation}
\hat{y}_2 = f_2(\mathrm{W}_2 \hat{y}_{1} + \hat{b}_{2}) =
f_2\left(\left[\begin{array}{ccc}
w^2_{11} &w^2_{12} &w^2_{13} \\
w^2_{21} &w^2_{22} &w^2_{23} \\
w^2_{31} &w^2_{32} &w^2_{33} \\
\end{array} \right] \cdot
\left[\begin{array}{c}
y^1_1 \\
y^1_2 \\
y^1_3 \\
\end{array}\right] +
\left[\begin{array}{c}
b^2_1 \\
b^2_2 \\
b^2_3 \\
\end{array}\right]\right).
\label{_auto6} \tag{11}
\end{equation}
$$
### Matrix-vector notation and activation
The activation of node $i$ in layer 2 is
<!-- Equation labels as ordinary links -->
<div id="_auto7"></div>
$$
\begin{equation}
y^2_i = f_2\Bigr(w^2_{i1}y^1_1 + w^2_{i2}y^1_2 + w^2_{i3}y^1_3 + b^2_i\Bigr) =
f_2\left(\sum_{j=1}^3 w^2_{ij} y_j^1 + b^2_i\right).
\label{_auto7} \tag{12}
\end{equation}
$$
This is not just a convenient and compact notation, but also a useful
and intuitive way to think about MLPs: The output is calculated by a
series of matrix-vector multiplications and vector additions that are
used as input to the activation functions. For each operation
$\mathrm{W}_l \hat{y}_{l-1}$ we move forward one layer.
### Activation functions
A property that characterizes a neural network, other than its
connectivity, is the choice of activation function(s). As described
in, the following restrictions are imposed on an activation function
for a FFNN to fulfill the universal approximation theorem
* Non-constant
* Bounded
* Monotonically-increasing
* Continuous
### Activation functions, Logistic and Hyperbolic ones
The second requirement excludes all linear functions. Furthermore, in
a MLP with only linear activation functions, each layer simply
performs a linear transformation of its inputs.
Regardless of the number of layers, the output of the NN will be
nothing but a linear function of the inputs. Thus we need to introduce
some kind of non-linearity to the NN to be able to fit non-linear
functions Typical examples are the logistic *Sigmoid*
$$
f(x) = \frac{1}{1 + e^{-x}},
$$
and the *hyperbolic tangent* function
$$
f(x) = \tanh(x)
$$
### Relevance
The *sigmoid* function are more biologically plausible because the
output of inactive neurons are zero. Such activation function are
called *one-sided*. However, it has been shown that the hyperbolic
tangent performs better than the sigmoid for training MLPs. has
become the most popular for *deep neural networks*
%matplotlib inline
"""The sigmoid function (or the logistic curve) is a
function that takes any real number, z, and outputs a number (0,1).
It is useful in neural networks for assigning weights on a relative scale.
The value z is the weighted sum of parameters involved in the learning algorithm."""
import numpy
import matplotlib.pyplot as plt
import math as mt
z = numpy.arange(-5, 5, .1)
sigma_fn = numpy.vectorize(lambda z: 1/(1+numpy.exp(-z)))
sigma = sigma_fn(z)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(z, sigma)
ax.set_ylim([-0.1, 1.1])
ax.set_xlim([-5,5])
ax.grid(True)
ax.set_xlabel('z')
ax.set_title('sigmoid function')
plt.show()
"""Step Function"""
z = numpy.arange(-5, 5, .02)
step_fn = numpy.vectorize(lambda z: 1.0 if z >= 0.0 else 0.0)
step = step_fn(z)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(z, step)
ax.set_ylim([-0.5, 1.5])
ax.set_xlim([-5,5])
ax.grid(True)
ax.set_xlabel('z')
ax.set_title('step function')
plt.show()
"""Sine Function"""
z = numpy.arange(-2*mt.pi, 2*mt.pi, 0.1)
t = numpy.sin(z)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(z, t)
ax.set_ylim([-1.0, 1.0])
ax.set_xlim([-2*mt.pi,2*mt.pi])
ax.grid(True)
ax.set_xlabel('z')
ax.set_title('sine function')
plt.show()
"""Plots a graph of the squashing function used by a rectified linear
unit"""
z = numpy.arange(-2, 2, .1)
zero = numpy.zeros(len(z))
y = numpy.max([zero, z], axis=0)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(z, y)
ax.set_ylim([-2.0, 2.0])
ax.set_xlim([-2.0, 2.0])
ax.grid(True)
ax.set_xlabel('z')
ax.set_title('Rectified linear unit')
plt.show()
## The multilayer perceptron (MLP)
The multilayer perceptron is a very popular, and easy to implement approach, to deep learning. It consists of
1. A neural network with one or more layers of nodes between the input and the output nodes.
2. The multilayer network structure, or architecture, or topology, consists of an input layer, one or more hidden layers, and one output layer.
3. The input nodes pass values to the first hidden layer, its nodes pass the information on to the second and so on till we reach the output layer.
As a convention it is normal to call a network with one layer of input units, one layer of hidden
units and one layer of output units as a two-layer network. A network with two layers of hidden units is called a three-layer network etc etc.
For an MLP network there is no direct connection between the output nodes/neurons/units and the input nodes/neurons/units.
Hereafter we will call the various entities of a layer for nodes.
There are also no connections within a single layer.
The number of input nodes does not need to equal the number of output
nodes. This applies also to the hidden layers. Each layer may have its
own number of nodes and activation functions.
The hidden layers have their name from the fact that they are not
linked to observables and as we will see below when we define the
so-called activation $\hat{z}$, we can think of this as a basis
expansion of the original inputs $\hat{x}$. The difference however
between neural networks and say linear regression is that now these
basis functions (which will correspond to the weights in the network)
are learned from data. This results in an important difference between
neural networks and deep learning approaches on one side and methods
like logistic regression or linear regression and their modifications on the other side.
## From one to many layers, the universal approximation theorem
A neural network with only one layer, what we called the simple
perceptron, is best suited if we have a standard binary model with
clear (linear) boundaries between the outcomes. As such it could
equally well be replaced by standard linear regression or logistic
regression. Networks with one or more hidden layers approximate
systems with more complex boundaries.
As stated earlier,
an important theorem in studies of neural networks, restated without
proof here, is the [universal approximation
theorem](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.441.7873&rep=rep1&type=pdf).
It states that a feed-forward network with a single hidden layer
containing a finite number of neurons can approximate continuous
functions on compact subsets of real functions. The theorem thus
states that simple neural networks can represent a wide variety of
interesting functions when given appropriate parameters. It is the
multilayer feedforward architecture itself which gives neural networks
the potential of being universal approximators.
## Deriving the back propagation code for a multilayer perceptron model
**Note: figures will be inserted later!**
As we have seen now in a feed forward network, we can express the final output of our network in terms of basic matrix-vector multiplications.
The unknowwn quantities are our weights $w_{ij}$ and we need to find an algorithm for changing them so that our errors are as small as possible.
This leads us to the famous [back propagation algorithm](https://www.nature.com/articles/323533a0).
The questions we want to ask are how do changes in the biases and the
weights in our network change the cost function and how can we use the
final output to modify the weights?
To derive these equations let us start with a plain regression problem
and define our cost function as
$$
{\cal C}(\hat{W}) = \frac{1}{2}\sum_{i=1}^n\left(y_i - t_i\right)^2,
$$
where the $t_i$s are our $n$ targets (the values we want to
reproduce), while the outputs of the network after having propagated
all inputs $\hat{x}$ are given by $y_i$. Below we will demonstrate
how the basic equations arising from the back propagation algorithm
can be modified in order to study classification problems with $K$
classes.
## Definitions
With our definition of the targets $\hat{t}$, the outputs of the
network $\hat{y}$ and the inputs $\hat{x}$ we
define now the activation $z_j^l$ of node/neuron/unit $j$ of the
$l$-th layer as a function of the bias, the weights which add up from
the previous layer $l-1$ and the forward passes/outputs
$\hat{a}^{l-1}$ from the previous layer as
$$
z_j^l = \sum_{i=1}^{M_{l-1}}w_{ij}^la_i^{l-1}+b_j^l,
$$
where $b_k^l$ are the biases from layer $l$. Here $M_{l-1}$
represents the total number of nodes/neurons/units of layer $l-1$. The
figure here illustrates this equation. We can rewrite this in a more
compact form as the matrix-vector products we discussed earlier,
$$
\hat{z}^l = \left(\hat{W}^l\right)^T\hat{a}^{l-1}+\hat{b}^l.
$$
With the activation values $\hat{z}^l$ we can in turn define the
output of layer $l$ as $\hat{a}^l = f(\hat{z}^l)$ where $f$ is our
activation function. In the examples here we will use the sigmoid
function discussed in our logistic regression lectures. We will also use the same activation function $f$ for all layers
and their nodes. It means we have
$$
a_j^l = f(z_j^l) = \frac{1}{1+\exp{-(z_j^l)}}.
$$
## Derivatives and the chain rule
From the definition of the activation $z_j^l$ we have
$$
\frac{\partial z_j^l}{\partial w_{ij}^l} = a_i^{l-1},
$$
and
$$
\frac{\partial z_j^l}{\partial a_i^{l-1}} = w_{ji}^l.
$$
With our definition of the activation function we have that (note that this function depends only on $z_j^l$)
$$
\frac{\partial a_j^l}{\partial z_j^{l}} = a_j^l(1-a_j^l)=f(z_j^l)(1-f(z_j^l)).
$$
## Derivative of the cost function
With these definitions we can now compute the derivative of the cost function in terms of the weights.
Let us specialize to the output layer $l=L$. Our cost function is
$$
{\cal C}(\hat{W^L}) = \frac{1}{2}\sum_{i=1}^n\left(y_i - t_i\right)^2=\frac{1}{2}\sum_{i=1}^n\left(a_i^L - t_i\right)^2,
$$
The derivative of this function with respect to the weights is
$$
\frac{\partial{\cal C}(\hat{W^L})}{\partial w_{jk}^L} = \left(a_j^L - t_j\right)\frac{\partial a_j^L}{\partial w_{jk}^{L}},
$$
The last partial derivative can easily be computed and reads (by applying the chain rule)
$$
\frac{\partial a_j^L}{\partial w_{jk}^{L}} = \frac{\partial a_j^L}{\partial z_{j}^{L}}\frac{\partial z_j^L}{\partial w_{jk}^{L}}=a_j^L(1-a_j^L)a_k^{L-1},
$$
## Bringing it together, first back propagation equation
We have thus
$$
\frac{\partial{\cal C}(\hat{W^L})}{\partial w_{jk}^L} = \left(a_j^L - t_j\right)a_j^L(1-a_j^L)a_k^{L-1},
$$
Defining
$$
\delta_j^L = a_j^L(1-a_j^L)\left(a_j^L - t_j\right) = f'(z_j^L)\frac{\partial {\cal C}}{\partial (a_j^L)},
$$
and using the Hadamard product of two vectors we can write this as
$$
\hat{\delta}^L = f'(\hat{z}^L)\circ\frac{\partial {\cal C}}{\partial (\hat{a}^L)}.
$$
This is an important expression. The second term on the right handside
measures how fast the cost function is changing as a function of the $j$th
output activation. If, for example, the cost function doesn't depend
much on a particular output node $j$, then $\delta_j^L$ will be small,
which is what we would expect. The first term on the right, measures
how fast the activation function $f$ is changing at a given activation
value $z_j^L$.
Notice that everything in the above equations is easily computed. In
particular, we compute $z_j^L$ while computing the behaviour of the
network, and it is only a small additional overhead to compute
$f'(z^L_j)$. The exact form of the derivative with respect to the
output depends on the form of the cost function.
However, provided the cost function is known there should be little
trouble in calculating
$$
\frac{\partial {\cal C}}{\partial (a_j^L)}
$$
With the definition of $\delta_j^L$ we have a more compact definition of the derivative of the cost function in terms of the weights, namely
$$
\frac{\partial{\cal C}(\hat{W^L})}{\partial w_{jk}^L} = \delta_j^La_k^{L-1}.
$$
## Derivatives in terms of $z_j^L$
It is also easy to see that our previous equation can be written as
$$
\delta_j^L =\frac{\partial {\cal C}}{\partial z_j^L}= \frac{\partial {\cal C}}{\partial a_j^L}\frac{\partial a_j^L}{\partial z_j^L},
$$
which can also be interpreted as the partial derivative of the cost function with respect to the biases $b_j^L$, namely
$$
\delta_j^L = \frac{\partial {\cal C}}{\partial b_j^L}\frac{\partial b_j^L}{\partial z_j^L}=\frac{\partial {\cal C}}{\partial b_j^L},
$$
That is, the error $\delta_j^L$ is exactly equal to the rate of change of the cost function as a function of the bias.
## Bringing it together
We have now three equations that are essential for the computations of the derivatives of the cost function at the output layer. These equations are needed to start the algorithm and they are
**The starting equations.**
<!-- Equation labels as ordinary links -->
<div id="_auto8"></div>
$$
\begin{equation}
\frac{\partial{\cal C}(\hat{W^L})}{\partial w_{jk}^L} = \delta_j^La_k^{L-1},
\label{_auto8} \tag{13}
\end{equation}
$$
and
<!-- Equation labels as ordinary links -->
<div id="_auto9"></div>
$$
\begin{equation}
\delta_j^L = f'(z_j^L)\frac{\partial {\cal C}}{\partial (a_j^L)},
\label{_auto9} \tag{14}
\end{equation}
$$
and
<!-- Equation labels as ordinary links -->
<div id="_auto10"></div>
$$
\begin{equation}
\delta_j^L = \frac{\partial {\cal C}}{\partial b_j^L},
\label{_auto10} \tag{15}
\end{equation}
$$
An interesting consequence of the above equations is that when the
activation $a_k^{L-1}$ is small, the gradient term, that is the
derivative of the cost function with respect to the weights, will also
tend to be small. We say then that the weight learns slowly, meaning
that it changes slowly when we minimize the weights via say gradient
descent. In this case we say the system learns slowly.
Another interesting feature is that is when the activation function,
represented by the sigmoid function here, is rather flat when we move towards
its end values $0$ and $1$ (see the above Python codes). In these
cases, the derivatives of the activation function will also be close
to zero, meaning again that the gradients will be small and the
network learns slowly again.
We need a fourth equation and we are set. We are going to propagate
backwards in order to the determine the weights and biases. In order
to do so we need to represent the error in the layer before the final
one $L-1$ in terms of the errors in the final output layer.
## Final back propagating equation
We have that (replacing $L$ with a general layer $l$)
$$
\delta_j^l =\frac{\partial {\cal C}}{\partial z_j^l}.
$$
We want to express this in terms of the equations for layer $l+1$. Using the chain rule and summing over all $k$ entries we have
$$
\delta_j^l =\sum_k \frac{\partial {\cal C}}{\partial z_k^{l+1}}\frac{\partial z_k^{l+1}}{\partial z_j^{l}}=\sum_k \delta_k^{l+1}\frac{\partial z_k^{l+1}}{\partial z_j^{l}},
$$
and recalling that
$$
z_j^{l+1} = \sum_{i=1}^{M_{l}}w_{ij}^{l+1}a_i^{l}+b_j^{l+1},
$$
with $M_l$ being the number of nodes in layer $l$, we obtain
$$
\delta_j^l =\sum_k \delta_k^{l+1}w_{kj}^{l+1}f'(z_j^l),
$$
This is our final equation.
We are now ready to set up the algorithm for back propagation and learning the weights and biases.
## Setting up the Back propagation algorithm
The four equations provide us with a way of computing the gradient of the cost function. Let us write this out in the form of an algorithm.
First, we set up the input data $\hat{x}$ and the activations
$\hat{z}_1$ of the input layer and compute the activation function and
the pertinent outputs $\hat{a}^1$.
Secondly, we perform then the feed forward till we reach the output
layer and compute all $\hat{z}_l$ of the input layer and compute the
activation function and the pertinent outputs $\hat{a}^l$ for
$l=2,3,\dots,L$.
Thereafter we compute the ouput error $\hat{\delta}^L$ by computing all
$$
\delta_j^L = f'(z_j^L)\frac{\partial {\cal C}}{\partial (a_j^L)}.
$$
Then we compute the back propagate error for each $l=L-1,L-2,\dots,2$ as
$$
\delta_j^l = \sum_k \delta_k^{l+1}w_{kj}^{l+1}f'(z_j^l).
$$
Finally, we update the weights and the biases using gradient descent for each $l=L-1,L-2,\dots,2$ and update the weights and biases according to the rules
$$
w_{jk}^l\leftarrow = w_{jk}^l- \eta \delta_j^la_k^{l-1},
$$
$$
b_j^l \leftarrow b_j^l-\eta \frac{\partial {\cal C}}{\partial b_j^l}=b_j^l-\eta \delta_j^l,
$$
The parameter $\eta$ is the learning parameter discussed in connection with the gradient descent methods.
Here it is convenient to use stochastic gradient descent (see the examples below) with mini-batches with an outer loop that steps through multiple epochs of training.
## Setting up a Multi-layer perceptron model for classification
We are now gong to develop an example based on the MNIST data
base. This is a classification problem and we need to use our
cross-entropy function we discussed in connection with logistic
regression. The cross-entropy defines our cost function for the
classificaton problems with neural networks.
In binary classification with two classes $(0, 1)$ we define the
logistic/sigmoid function as the probability that a particular input
is in class $0$ or $1$. This is possible because the logistic
function takes any input from the real numbers and inputs a number
between 0 and 1, and can therefore be interpreted as a probability. It
also has other nice properties, such as a derivative that is simple to
calculate.
For an input $\boldsymbol{a}$ from the hidden layer, the probability that the input $\boldsymbol{x}$
is in class 0 or 1 is just. We let $\theta$ represent the unknown weights and biases to be adjusted by our equations). The variable $x$
represents our activation values $z$. We have
$$
P(y = 0 \mid \hat{x}, \hat{\theta}) = \frac{1}{1 + \exp{(- \hat{x}})} ,
$$
and
$$
P(y = 1 \mid \hat{x}, \hat{\theta}) = 1 - P(y = 0 \mid \hat{x}, \hat{\theta}) ,
$$
where $y \in \{0, 1\}$ and $\hat{\theta}$ represents the weights and biases
of our network.
## Defining the cost function
Our cost function is given as (see the Logistic regression lectures)
$$
\mathcal{C}(\hat{\theta}) = - \ln P(\mathcal{D} \mid \hat{\theta}) = - \sum_{i=1}^n
y_i \ln[P(y_i = 0)] + (1 - y_i) \ln [1 - P(y_i = 0)] = \sum_{i=1}^n \mathcal{L}_i(\hat{\theta}) .
$$
This last equality means that we can interpret our *cost* function as a sum over the *loss* function
for each point in the dataset $\mathcal{L}_i(\hat{\theta})$.
The negative sign is just so that we can think about our algorithm as minimizing a positive number, rather
than maximizing a negative number.
In *multiclass* classification it is common to treat each integer label as a so called *one-hot* vector:
$y = 5 \quad \rightarrow \quad \hat{y} = (0, 0, 0, 0, 0, 1, 0, 0, 0, 0) ,$ and
$y = 1 \quad \rightarrow \quad \hat{y} = (0, 1, 0, 0, 0, 0, 0, 0, 0, 0) ,$
i.e. a binary bit string of length $C$, where $C = 10$ is the number of classes in the MNIST dataset (numbers from $0$ to $9$)..
If $\hat{x}_i$ is the $i$-th input (image), $y_{ic}$ refers to the $c$-th component of the $i$-th
output vector $\hat{y}_i$.
The probability of $\hat{x}_i$ being in class $c$ will be given by the softmax function:
$$
P(y_{ic} = 1 \mid \hat{x}_i, \hat{\theta}) = \frac{\exp{((\hat{a}_i^{hidden})^T \hat{w}_c)}}
{\sum_{c'=0}^{C-1} \exp{((\hat{a}_i^{hidden})^T \hat{w}_{c'})}} ,
$$
which reduces to the logistic function in the binary case.
The likelihood of this $C$-class classifier
is now given as:
$$
P(\mathcal{D} \mid \hat{\theta}) = \prod_{i=1}^n \prod_{c=0}^{C-1} [P(y_{ic} = 1)]^{y_{ic}} .
$$
Again we take the negative log-likelihood to define our cost function:
$$
\mathcal{C}(\hat{\theta}) = - \log{P(\mathcal{D} \mid \hat{\theta})}.
$$
See the logistic regression lectures for a full definition of the cost function.
The back propagation equations need now only a small change, namely the definition of a new cost function. We are thus ready to use the same equations as before!
## Example: binary classification problem
As an example of the above, relevant for project 2 as well, let us consider a binary class. As discussed in our logistic regression lectures, we defined a cost function in terms of the parameters $\beta$ as
$$
\mathcal{C}(\hat{\beta}) = - \sum_{i=1}^n \left(y_i\log{p(y_i \vert x_i,\hat{\beta})}+(1-y_i)\log{1-p(y_i \vert x_i,\hat{\beta})}\right),
$$
where we had defined the logistic (sigmoid) function
$$
p(y_i =1\vert x_i,\hat{\beta})=\frac{\exp{(\beta_0+\beta_1 x_i)}}{1+\exp{(\beta_0+\beta_1 x_i)}},
$$
and
$$
p(y_i =0\vert x_i,\hat{\beta})=1-p(y_i =1\vert x_i,\hat{\beta}).
$$
The parameters $\hat{\beta}$ were defined using a minimization method like gradient descent or Newton-Raphson's method.
Now we replace $x_i$ with the activation $z_i^l$ for a given layer $l$ and the outputs as $y_i=a_i^l=f(z_i^l)$, with $z_i^l$ now being a function of the weights $w_{ij}^l$ and biases $b_i^l$.
We have then
$$
a_i^l = y_i = \frac{\exp{(z_i^l)}}{1+\exp{(z_i^l)}},
$$
with
$$
z_i^l = \sum_{j}w_{ij}^l a_j^{l-1}+b_i^l,
$$
where the superscript $l-1$ indicates that these are the outputs from layer $l-1$.
Our cost function at the final layer $l=L$ is now
$$
\mathcal{C}(\hat{W}) = - \sum_{i=1}^n \left(t_i\log{a_i^L}+(1-t_i)\log{(1-a_i^L)}\right),
$$
where we have defined the targets $t_i$. The derivatives of the cost function with respect to the output $a_i^L$ are then easily calculated and we get
$$
\frac{\partial \mathcal{C}(\hat{W})}{\partial a_i^L} = \frac{a_i^L-t_i}{a_i^L(1-a_i^L)}.
$$
In case we use another activation function than the logistic one, we need to evaluate other derivatives.
## The Softmax function
In case we employ the more general case given by the Softmax equation, we need to evaluate the derivative of the activation function with respect to the activation $z_i^l$, that is we need
$$
\frac{\partial f(z_i^l)}{\partial w_{jk}^l} =
\frac{\partial f(z_i^l)}{\partial z_j^l} \frac{\partial z_j^l}{\partial w_{jk}^l}= \frac{\partial f(z_i^l)}{\partial z_j^l}a_k^{l-1}.
$$
For the Softmax function we have
$$
f(z_i^l) = \frac{\exp{(z_i^l)}}{\sum_{m=1}^K\exp{(z_m^l)}}.
$$
Its derivative with respect to $z_j^l$ gives
$$
\frac{\partial f(z_i^l)}{\partial z_j^l}= f(z_i^l)\left(\delta_{ij}-f(z_j^l)\right),
$$
which in case of the simply binary model reduces to having $i=j$.
## Developing a code for doing neural networks with back propagation
One can identify a set of key steps when using neural networks to solve supervised learning problems:
1. Collect and pre-process data
2. Define model and architecture
3. Choose cost function and optimizer
4. Train the model
5. Evaluate model performance on test data
6. Adjust hyperparameters (if necessary, network architecture)
## Collect and pre-process data
Here we will be using the MNIST dataset, which is readily available through the **scikit-learn**
package. You may also find it for example [here](http://yann.lecun.com/exdb/mnist/).
The *MNIST* (Modified National Institute of Standards and Technology) database is a large database
of handwritten digits that is commonly used for training various image processing systems.
The MNIST dataset consists of 70 000 images of size $28\times 28$ pixels, each labeled from 0 to 9.
The scikit-learn dataset we will use consists of a selection of 1797 images of size $8\times 8$ collected and processed from this database.
To feed data into a feed-forward neural network we need to represent
the inputs as a design/feature matrix $X = (n_{inputs}, n_{features})$. Each
row represents an *input*, in this case a handwritten digit, and
each column represents a *feature*, in this case a pixel. The
correct answers, also known as *labels* or *targets* are
represented as a 1D array of integers
$Y = (n_{inputs}) = (5, 3, 1, 8,...)$.
As an example, say we want to build a neural network using supervised learning to predict Body-Mass Index (BMI) from
measurements of height (in m)
and weight (in kg). If we have measurements of 5 people the design/feature matrix could be for example:
$$ X = \begin{bmatrix}
1.85 & 81\\
1.71 & 65\\
1.95 & 103\\
1.55 & 42\\
1.63 & 56
\end{bmatrix} ,$$
and the targets would be:
$$ Y = (23.7, 22.2, 27.1, 17.5, 21.1) $$
Since each input image is a 2D matrix, we need to flatten the image
(i.e. "unravel" the 2D matrix into a 1D array) to turn the data into a
design/feature matrix. This means we lose all spatial information in the
image, such as locality and translational invariance. More complicated
architectures such as Convolutional Neural Networks can take advantage
of such information, and are most commonly applied when analyzing
images.
# import necessary packages
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
# ensure the same random numbers appear every time
np.random.seed(0)
# display images in notebook
%matplotlib inline
plt.rcParams['figure.figsize'] = (12,12)
# download MNIST dataset
digits = datasets.load_digits()
# define inputs and labels
inputs = digits.images
labels = digits.target
print("inputs = (n_inputs, pixel_width, pixel_height) = " + str(inputs.shape))
print("labels = (n_inputs) = " + str(labels.shape))
# flatten the image
# the value -1 means dimension is inferred from the remaining dimensions: 8x8 = 64
n_inputs = len(inputs)
inputs = inputs.reshape(n_inputs, -1)
print("X = (n_inputs, n_features) = " + str(inputs.shape))
# choose some random images to display
indices = np.arange(n_inputs)
random_indices = np.random.choice(indices, size=5)
for i, image in enumerate(digits.images[random_indices]):
plt.subplot(1, 5, i+1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title("Label: %d" % digits.target[random_indices[i]])
plt.show()
## Train and test datasets
Performing analysis before partitioning the dataset is a major error, that can lead to incorrect conclusions.
We will reserve $80 \%$ of our dataset for training and $20 \%$ for testing.
It is important that the train and test datasets are drawn randomly from our dataset, to ensure
no bias in the sampling.
Say you are taking measurements of weather data to predict the weather in the coming 5 days.
You don't want to train your model on measurements taken from the hours 00.00 to 12.00, and then test it on data
collected from 12.00 to 24.00.
from sklearn.model_selection import train_test_split
# one-liner from scikit-learn library
train_size = 0.8
test_size = 1 - train_size
X_train, X_test, Y_train, Y_test = train_test_split(inputs, labels, train_size=train_size,
test_size=test_size)
# equivalently in numpy
def train_test_split_numpy(inputs, labels, train_size, test_size):
n_inputs = len(inputs)
inputs_shuffled = inputs.copy()
labels_shuffled = labels.copy()
np.random.shuffle(inputs_shuffled)
np.random.shuffle(labels_shuffled)
train_end = int(n_inputs*train_size)
X_train, X_test = inputs_shuffled[:train_end], inputs_shuffled[train_end:]
Y_train, Y_test = labels_shuffled[:train_end], labels_shuffled[train_end:]
return X_train, X_test, Y_train, Y_test
#X_train, X_test, Y_train, Y_test = train_test_split_numpy(inputs, labels, train_size, test_size)
print("Number of training images: " + str(len(X_train)))
print("Number of test images: " + str(len(X_test)))
## Define model and architecture
Our simple feed-forward neural network will consist of an *input* layer, a single *hidden* layer and an *output* layer. The activation $y$ of each neuron is a weighted sum of inputs, passed through an activation function. In case of the simple perceptron model we have
$$ z = \sum_{i=1}^n w_i a_i ,$$
$$ y = f(z) ,$$
where $f$ is the activation function, $a_i$ represents input from neuron $i$ in the preceding layer
and $w_i$ is the weight to input $i$.
The activation of the neurons in the input layer is just the features (e.g. a pixel value).
The simplest activation function for a neuron is the *Heaviside* function:
$$ f(z) =
\begin{cases}
1, & z > 0\\
0, & \text{otherwise}
\end{cases}
$$
A feed-forward neural network with this activation is known as a *perceptron*.
For a binary classifier (i.e. two classes, 0 or 1, dog or not-dog) we can also use this in our output layer.
This activation can be generalized to $k$ classes (using e.g. the *one-against-all* strategy),
and we call these architectures *multiclass perceptrons*.
However, it is now common to use the terms Single Layer Perceptron (SLP) (1 hidden layer) and
Multilayer Perceptron (MLP) (2 or more hidden layers) to refer to feed-forward neural networks with any activation function.
Typical choices for activation functions include the sigmoid function, hyperbolic tangent, and Rectified Linear Unit (ReLU).
We will be using the sigmoid function $\sigma(x)$:
$$ f(x) = \sigma(x) = \frac{1}{1 + e^{-x}} ,$$
which is inspired by probability theory (see logistic regression) and was most commonly used until about 2011. See the discussion below concerning other activation functions.
## Layers
* Input
Since each input image has 8x8 = 64 pixels or features, we have an input layer of 64 neurons.
* Hidden layer
We will use 50 neurons in the hidden layer receiving input from the neurons in the input layer.
Since each neuron in the hidden layer is connected to the 64 inputs we have 64x50 = 3200 weights to the hidden layer.
* Output
If we were building a binary classifier, it would be sufficient with a single neuron in the output layer,
which could output 0 or 1 according to the Heaviside function. This would be an example of a *hard* classifier, meaning it outputs the class of the input directly. However, if we are dealing with noisy data it is often beneficial to use a *soft* classifier, which outputs the probability of being in class 0 or 1.
For a soft binary classifier, we could use a single neuron and interpret the output as either being the probability of being in class 0 or the probability of being in class 1. Alternatively we could use 2 neurons, and interpret each neuron as the probability of being in each class.
Since we are doing multiclass classification, with 10 categories, it is natural to use 10 neurons in the output layer. We number the neurons $j = 0,1,...,9$. The activation of each output neuron $j$ will be according to the *softmax* function:
$$ P(\text{class $j$} \mid \text{input $\hat{a}$}) = \frac{\exp{(\hat{a}^T \hat{w}_j)}}
{\sum_{c=0}^{9} \exp{(\hat{a}^T \hat{w}_c)}} ,$$
i.e. each neuron $j$ outputs the probability of being in class $j$ given an input from the hidden layer $\hat{a}$, with $\hat{w}_j$ the weights of neuron $j$ to the inputs.
The denominator is a normalization factor to ensure the outputs (probabilities) sum up to 1.
The exponent is just the weighted sum of inputs as before:
$$ z_j = \sum_{i=1}^n w_ {ij} a_i+b_j.$$
Since each neuron in the output layer is connected to the 50 inputs from the hidden layer we have 50x10 = 500
weights to the output layer.
## Weights and biases
Typically weights are initialized with small values distributed around zero, drawn from a uniform
or normal distribution. Setting all weights to zero means all neurons give the same output, making the network useless.
Adding a bias value to the weighted sum of inputs allows the neural network to represent a greater range
of values. Without it, any input with the value 0 will be mapped to zero (before being passed through the activation). The bias unit has an output of 1, and a weight to each neuron $j$, $b_j$:
$$ z_j = \sum_{i=1}^n w_ {ij} a_i + b_j.$$
The bias weights $\hat{b}$ are often initialized to zero, but a small value like $0.01$ ensures all neurons have some output which can be backpropagated in the first training cycle.
# building our neural network
n_inputs, n_features = X_train.shape
n_hidden_neurons = 50
n_categories = 10
# we make the weights normally distributed using numpy.random.randn
# weights and bias in the hidden layer
hidden_weights = np.random.randn(n_features, n_hidden_neurons)
hidden_bias = np.zeros(n_hidden_neurons) + 0.01
# weights and bias in the output layer
output_weights = np.random.randn(n_hidden_neurons, n_categories)
output_bias = np.zeros(n_categories) + 0.01
## Feed-forward pass
Denote $F$ the number of features, $H$ the number of hidden neurons and $C$ the number of categories.
For each input image we calculate a weighted sum of input features (pixel values) to each neuron $j$ in the hidden layer $l$:
$$ z_{j}^{l} = \sum_{i=1}^{F} w_{ij}^{l} x_i + b_{j}^{l},$$
this is then passed through our activation function
$$ a_{j}^{l} = f(z_{j}^{l}) .$$
We calculate a weighted sum of inputs (activations in the hidden layer) to each neuron $j$ in the output layer:
$$ z_{j}^{L} = \sum_{i=1}^{H} w_{ij}^{L} a_{i}^{l} + b_{j}^{L}.$$
Finally we calculate the output of neuron $j$ in the output layer using the softmax function:
$$ a_{j}^{L} = \frac{\exp{(z_j^{L})}}
{\sum_{c=0}^{C-1} \exp{(z_c^{L})}} .$$
## Matrix multiplications
Since our data has the dimensions $X = (n_{inputs}, n_{features})$ and our weights to the hidden
layer have the dimensions
$W_{hidden} = (n_{features}, n_{hidden})$,
we can easily feed the network all our training data in one go by taking the matrix product
$$ X W^{h} = (n_{inputs}, n_{hidden}),$$
and obtain a matrix that holds the weighted sum of inputs to the hidden layer
for each input image and each hidden neuron.
We also add the bias to obtain a matrix of weighted sums to the hidden layer $Z^{h}$:
$$ \hat{z}^{l} = \hat{X} \hat{W}^{l} + \hat{b}^{l} ,$$
meaning the same bias (1D array with size equal number of hidden neurons) is added to each input image.
This is then passed through the activation:
$$ \hat{a}^{l} = f(\hat{z}^l) .$$
This is fed to the output layer:
$$ \hat{z}^{L} = \hat{a}^{L} \hat{W}^{L} + \hat{b}^{L} .$$
Finally we receive our output values for each image and each category by passing it through the softmax function:
$$ output = softmax (\hat{z}^{L}) = (n_{inputs}, n_{categories}) .$$
# setup the feed-forward pass, subscript h = hidden layer
def sigmoid(x):
return 1/(1 + np.exp(-x))
def feed_forward(X):
# weighted sum of inputs to the hidden layer
z_h = np.matmul(X, hidden_weights) + hidden_bias
# activation in the hidden layer
a_h = sigmoid(z_h)
# weighted sum of inputs to the output layer
z_o = np.matmul(a_h, output_weights) + output_bias
# softmax output
# axis 0 holds each input and axis 1 the probabilities of each category
exp_term = np.exp(z_o)
probabilities = exp_term / np.sum(exp_term, axis=1, keepdims=True)
return probabilities
probabilities = feed_forward(X_train)
print("probabilities = (n_inputs, n_categories) = " + str(probabilities.shape))
print("probability that image 0 is in category 0,1,2,...,9 = \n" + str(probabilities[0]))
print("probabilities sum up to: " + str(probabilities[0].sum()))
print()
# we obtain a prediction by taking the class with the highest likelihood
def predict(X):
probabilities = feed_forward(X)
return np.argmax(probabilities, axis=1)
predictions = predict(X_train)
print("predictions = (n_inputs) = " + str(predictions.shape))
print("prediction for image 0: " + str(predictions[0]))
print("correct label for image 0: " + str(Y_train[0]))
## Choose cost function and optimizer
To measure how well our neural network is doing we need to introduce a cost function.
We will call the function that gives the error of a single sample output the *loss* function, and the function
that gives the total error of our network across all samples the *cost* function.
A typical choice for multiclass classification is the *cross-entropy* loss, also known as the negative log likelihood.
In *multiclass* classification it is common to treat each integer label as a so called *one-hot* vector:
$$ y = 5 \quad \rightarrow \quad \hat{y} = (0, 0, 0, 0, 0, 1, 0, 0, 0, 0) ,$$
$$ y = 1 \quad \rightarrow \quad \hat{y} = (0, 1, 0, 0, 0, 0, 0, 0, 0, 0) ,$$
i.e. a binary bit string of length $C$, where $C = 10$ is the number of classes in the MNIST dataset.
Let $y_{ic}$ denote the $c$-th component of the $i$-th one-hot vector.
We define the cost function $\mathcal{C}$ as a sum over the cross-entropy loss for each point $\hat{x}_i$ in the dataset.
In the one-hot representation only one of the terms in the loss function is non-zero, namely the
probability of the correct category $c'$
(i.e. the category $c'$ such that $y_{ic'} = 1$). This means that the cross entropy loss only punishes you for how wrong
you got the correct label. The probability of category $c$ is given by the softmax function. The vector $\hat{\theta}$ represents the parameters of our network, i.e. all the weights and biases.
## Optimizing the cost function
The network is trained by finding the weights and biases that minimize the cost function. One of the most widely used classes of methods is *gradient descent* and its generalizations. The idea behind gradient descent
is simply to adjust the weights in the direction where the gradient of the cost function is large and negative. This ensures we flow toward a *local* minimum of the cost function.
Each parameter $\theta$ is iteratively adjusted according to the rule
$$ \theta_{i+1} = \theta_i - \eta \nabla \mathcal{C}(\theta_i) ,$$
where $\eta$ is known as the *learning rate*, which controls how big a step we take towards the minimum.
This update can be repeated for any number of iterations, or until we are satisfied with the result.
A simple and effective improvement is a variant called *Batch Gradient Descent*.
Instead of calculating the gradient on the whole dataset, we calculate an approximation of the gradient
on a subset of the data called a *minibatch*.
If there are $N$ data points and we have a minibatch size of $M$, the total number of batches
is $N/M$.
We denote each minibatch $B_k$, with $k = 1, 2,...,N/M$. The gradient then becomes:
$$ \nabla \mathcal{C}(\theta) = \frac{1}{N} \sum_{i=1}^N \nabla \mathcal{L}_i(\theta) \quad \rightarrow \quad
\frac{1}{M} \sum_{i \in B_k} \nabla \mathcal{L}_i(\theta) ,$$
i.e. instead of averaging the loss over the entire dataset, we average over a minibatch.
This has two important benefits:
1. Introducing stochasticity decreases the chance that the algorithm becomes stuck in a local minima.
2. It significantly speeds up the calculation, since we do not have to use the entire dataset to calculate the gradient.
The various optmization methods, with codes and algorithms, are discussed in our lectures on [Gradient descent approaches](https://compphysics.github.io/MachineLearning/doc/pub/Splines/html/Splines-bs.html).
## Regularization
It is common to add an extra term to the cost function, proportional
to the size of the weights. This is equivalent to constraining the
size of the weights, so that they do not grow out of control.
Constraining the size of the weights means that the weights cannot
grow arbitrarily large to fit the training data, and in this way
reduces *overfitting*.
We will measure the size of the weights using the so called *L2-norm*, meaning our cost function becomes:
$$ \mathcal{C}(\theta) = \frac{1}{N} \sum_{i=1}^N \mathcal{L}_i(\theta) \quad \rightarrow \quad
\frac{1}{N} \sum_{i=1}^N \mathcal{L}_i(\theta) + \lambda \lvert \lvert \hat{w} \rvert \rvert_2^2
= \frac{1}{N} \sum_{i=1}^N \mathcal{L}(\theta) + \lambda \sum_{ij} w_{ij}^2,$$
i.e. we sum up all the weights squared. The factor $\lambda$ is known as a regularization parameter.
In order to train the model, we need to calculate the derivative of
the cost function with respect to every bias and weight in the
network. In total our network has $(64 + 1)\times 50=3250$ weights in
the hidden layer and $(50 + 1)\times 10=510$ weights to the output
layer ($+1$ for the bias), and the gradient must be calculated for
every parameter. We use the *backpropagation* algorithm discussed
above. This is a clever use of the chain rule that allows us to
calculate the gradient efficently.
## Matrix multiplication
To more efficently train our network these equations are implemented using matrix operations.
The error in the output layer is calculated simply as, with $\hat{t}$ being our targets,
$$ \delta_L = \hat{t} - \hat{y} = (n_{inputs}, n_{categories}) .$$
The gradient for the output weights is calculated as
$$ \nabla W_{L} = \hat{a}^T \delta_L = (n_{hidden}, n_{categories}) ,$$
where $\hat{a} = (n_{inputs}, n_{hidden})$. This simply means that we are summing up the gradients for each input.
Since we are going backwards we have to transpose the activation matrix.
The gradient with respect to the output bias is then
$$ \nabla \hat{b}_{L} = \sum_{i=1}^{n_{inputs}} \delta_L = (n_{categories}) .$$
The error in the hidden layer is
$$ \Delta_h = \delta_L W_{L}^T \circ f'(z_{h}) = \delta_L W_{L}^T \circ a_{h} \circ (1 - a_{h}) = (n_{inputs}, n_{hidden}) ,$$
where $f'(a_{h})$ is the derivative of the activation in the hidden layer. The matrix products mean
that we are summing up the products for each neuron in the output layer. The symbol $\circ$ denotes
the *Hadamard product*, meaning element-wise multiplication.
This again gives us the gradients in the hidden layer:
$$ \nabla W_{h} = X^T \delta_h = (n_{features}, n_{hidden}) ,$$
$$ \nabla b_{h} = \sum_{i=1}^{n_{inputs}} \delta_h = (n_{hidden}) .$$
# to categorical turns our integer vector into a onehot representation
from sklearn.metrics import accuracy_score
# one-hot in numpy
def to_categorical_numpy(integer_vector):
n_inputs = len(integer_vector)
n_categories = np.max(integer_vector) + 1
onehot_vector = np.zeros((n_inputs, n_categories))
onehot_vector[range(n_inputs), integer_vector] = 1
return onehot_vector
#Y_train_onehot, Y_test_onehot = to_categorical(Y_train), to_categorical(Y_test)
Y_train_onehot, Y_test_onehot = to_categorical_numpy(Y_train), to_categorical_numpy(Y_test)
def feed_forward_train(X):
# weighted sum of inputs to the hidden layer
z_h = np.matmul(X, hidden_weights) + hidden_bias
# activation in the hidden layer
a_h = sigmoid(z_h)
# weighted sum of inputs to the output layer
z_o = np.matmul(a_h, output_weights) + output_bias
# softmax output
# axis 0 holds each input and axis 1 the probabilities of each category
exp_term = np.exp(z_o)
probabilities = exp_term / np.sum(exp_term, axis=1, keepdims=True)
# for backpropagation need activations in hidden and output layers
return a_h, probabilities
def backpropagation(X, Y):
a_h, probabilities = feed_forward_train(X)
# error in the output layer
error_output = probabilities - Y
# error in the hidden layer
error_hidden = np.matmul(error_output, output_weights.T) * a_h * (1 - a_h)
# gradients for the output layer
output_weights_gradient = np.matmul(a_h.T, error_output)
output_bias_gradient = np.sum(error_output, axis=0)
# gradient for the hidden layer
hidden_weights_gradient = np.matmul(X.T, error_hidden)
hidden_bias_gradient = np.sum(error_hidden, axis=0)
return output_weights_gradient, output_bias_gradient, hidden_weights_gradient, hidden_bias_gradient
print("Old accuracy on training data: " + str(accuracy_score(predict(X_train), Y_train)))
eta = 0.01
lmbd = 0.01
for i in range(1000):
# calculate gradients
dWo, dBo, dWh, dBh = backpropagation(X_train, Y_train_onehot)
# regularization term gradients
dWo += lmbd * output_weights
dWh += lmbd * hidden_weights
# update weights and biases
output_weights -= eta * dWo
output_bias -= eta * dBo
hidden_weights -= eta * dWh
hidden_bias -= eta * dBh
print("New accuracy on training data: " + str(accuracy_score(predict(X_train), Y_train)))
## Improving performance
As we can see the network does not seem to be learning at all. It seems to be just guessing the label for each image.
In order to obtain a network that does something useful, we will have to do a bit more work.
The choice of *hyperparameters* such as learning rate and regularization parameter is hugely influential for the performance of the network. Typically a *grid-search* is performed, wherein we test different hyperparameters separated by orders of magnitude. For example we could test the learning rates $\eta = 10^{-6}, 10^{-5},...,10^{-1}$ with different regularization parameters $\lambda = 10^{-6},...,10^{-0}$.
Next, we haven't implemented minibatching yet, which introduces stochasticity and is though to act as an important regularizer on the weights. We call a feed-forward + backward pass with a minibatch an *iteration*, and a full training period
going through the entire dataset ($n/M$ batches) an *epoch*.
If this does not improve network performance, you may want to consider altering the network architecture, adding more neurons or hidden layers.
Andrew Ng goes through some of these considerations in this [video](https://youtu.be/F1ka6a13S9I). You can find a summary of the video [here](https://kevinzakka.github.io/2016/09/26/applying-deep-learning/).
## Full object-oriented implementation
It is very natural to think of the network as an object, with specific instances of the network
being realizations of this object with different hyperparameters. An implementation using Python classes provides a clean structure and interface, and the full implementation of our neural network is given below.
class NeuralNetwork:
def __init__(
self,
X_data,
Y_data,
n_hidden_neurons=50,
n_categories=10,
epochs=10,
batch_size=100,
eta=0.1,
lmbd=0.0):
self.X_data_full = X_data
self.Y_data_full = Y_data
self.n_inputs = X_data.shape[0]
self.n_features = X_data.shape[1]
self.n_hidden_neurons = n_hidden_neurons
self.n_categories = n_categories
self.epochs = epochs
self.batch_size = batch_size
self.iterations = self.n_inputs // self.batch_size
self.eta = eta
self.lmbd = lmbd
self.create_biases_and_weights()
def create_biases_and_weights(self):
self.hidden_weights = np.random.randn(self.n_features, self.n_hidden_neurons)
self.hidden_bias = np.zeros(self.n_hidden_neurons) + 0.01
self.output_weights = np.random.randn(self.n_hidden_neurons, self.n_categories)
self.output_bias = np.zeros(self.n_categories) + 0.01
def feed_forward(self):
# feed-forward for training
self.z_h = np.matmul(self.X_data, self.hidden_weights) + self.hidden_bias
self.a_h = sigmoid(self.z_h)
self.z_o = np.matmul(self.a_h, self.output_weights) + self.output_bias
exp_term = np.exp(self.z_o)
self.probabilities = exp_term / np.sum(exp_term, axis=1, keepdims=True)
def feed_forward_out(self, X):
# feed-forward for output
z_h = np.matmul(X, self.hidden_weights) + self.hidden_bias
a_h = sigmoid(z_h)
z_o = np.matmul(a_h, self.output_weights) + self.output_bias
exp_term = np.exp(z_o)
probabilities = exp_term / np.sum(exp_term, axis=1, keepdims=True)
return probabilities
def backpropagation(self):
error_output = self.probabilities - self.Y_data
error_hidden = np.matmul(error_output, self.output_weights.T) * self.a_h * (1 - self.a_h)
self.output_weights_gradient = np.matmul(self.a_h.T, error_output)
self.output_bias_gradient = np.sum(error_output, axis=0)
self.hidden_weights_gradient = np.matmul(self.X_data.T, error_hidden)
self.hidden_bias_gradient = np.sum(error_hidden, axis=0)
if self.lmbd > 0.0:
self.output_weights_gradient += self.lmbd * self.output_weights
self.hidden_weights_gradient += self.lmbd * self.hidden_weights
self.output_weights -= self.eta * self.output_weights_gradient
self.output_bias -= self.eta * self.output_bias_gradient
self.hidden_weights -= self.eta * self.hidden_weights_gradient
self.hidden_bias -= self.eta * self.hidden_bias_gradient
def predict(self, X):
probabilities = self.feed_forward_out(X)
return np.argmax(probabilities, axis=1)
def predict_probabilities(self, X):
probabilities = self.feed_forward_out(X)
return probabilities
def train(self):
data_indices = np.arange(self.n_inputs)
for i in range(self.epochs):
for j in range(self.iterations):
# pick datapoints with replacement
chosen_datapoints = np.random.choice(
data_indices, size=self.batch_size, replace=False
)
# minibatch training data
self.X_data = self.X_data_full[chosen_datapoints]
self.Y_data = self.Y_data_full[chosen_datapoints]
self.feed_forward()
self.backpropagation()
## Evaluate model performance on test data
To measure the performance of our network we evaluate how well it does it data it has never seen before, i.e. the test data.
We measure the performance of the network using the *accuracy* score.
The accuracy is as you would expect just the number of images correctly labeled divided by the total number of images. A perfect classifier will have an accuracy score of $1$.
$$ \text{Accuracy} = \frac{\sum_{i=1}^n I(\hat{y}_i = y_i)}{n} ,$$
where $I$ is the indicator function, $1$ if $\hat{y}_i = y_i$ and $0$ otherwise.
epochs = 100
batch_size = 100
dnn = NeuralNetwork(X_train, Y_train_onehot, eta=eta, lmbd=lmbd, epochs=epochs, batch_size=batch_size,
n_hidden_neurons=n_hidden_neurons, n_categories=n_categories)
dnn.train()
test_predict = dnn.predict(X_test)
# accuracy score from scikit library
print("Accuracy score on test set: ", accuracy_score(Y_test, test_predict))
# equivalent in numpy
def accuracy_score_numpy(Y_test, Y_pred):
return np.sum(Y_test == Y_pred) / len(Y_test)
#print("Accuracy score on test set: ", accuracy_score_numpy(Y_test, test_predict))
## Adjust hyperparameters
We now perform a grid search to find the optimal hyperparameters for the network.
Note that we are only using 1 layer with 50 neurons, and human performance is estimated to be around $98\%$ ($2\%$ error rate).
eta_vals = np.logspace(-5, 1, 7)
lmbd_vals = np.logspace(-5, 1, 7)
# store the models for later use
DNN_numpy = np.zeros((len(eta_vals), len(lmbd_vals)), dtype=object)
# grid search
for i, eta in enumerate(eta_vals):
for j, lmbd in enumerate(lmbd_vals):
dnn = NeuralNetwork(X_train, Y_train_onehot, eta=eta, lmbd=lmbd, epochs=epochs, batch_size=batch_size,
n_hidden_neurons=n_hidden_neurons, n_categories=n_categories)
dnn.train()
DNN_numpy[i][j] = dnn
test_predict = dnn.predict(X_test)
print("Learning rate = ", eta)
print("Lambda = ", lmbd)
print("Accuracy score on test set: ", accuracy_score(Y_test, test_predict))
print()
## Visualization
# visual representation of grid search
# uses seaborn heatmap, you can also do this with matplotlib imshow
import seaborn as sns
sns.set()
train_accuracy = np.zeros((len(eta_vals), len(lmbd_vals)))
test_accuracy = np.zeros((len(eta_vals), len(lmbd_vals)))
for i in range(len(eta_vals)):
for j in range(len(lmbd_vals)):
dnn = DNN_numpy[i][j]
train_pred = dnn.predict(X_train)
test_pred = dnn.predict(X_test)
train_accuracy[i][j] = accuracy_score(Y_train, train_pred)
test_accuracy[i][j] = accuracy_score(Y_test, test_pred)
fig, ax = plt.subplots(figsize = (10, 10))
sns.heatmap(train_accuracy, annot=True, ax=ax, cmap="viridis")
ax.set_title("Training Accuracy")
ax.set_ylabel("$\eta$")
ax.set_xlabel("$\lambda$")
plt.show()
fig, ax = plt.subplots(figsize = (10, 10))
sns.heatmap(test_accuracy, annot=True, ax=ax, cmap="viridis")
ax.set_title("Test Accuracy")
ax.set_ylabel("$\eta$")
ax.set_xlabel("$\lambda$")
plt.show()
## scikit-learn implementation
**scikit-learn** focuses more
on traditional machine learning methods, such as regression,
clustering, decision trees, etc. As such, it has only two types of
neural networks: Multi Layer Perceptron outputting continuous values,
*MPLRegressor*, and Multi Layer Perceptron outputting labels,
*MLPClassifier*. We will see how simple it is to use these classes.
**scikit-learn** implements a few improvements from our neural network,
such as early stopping, a varying learning rate, different
optimization methods, etc. We would therefore expect a better
performance overall.
from sklearn.neural_network import MLPClassifier
# store models for later use
DNN_scikit = np.zeros((len(eta_vals), len(lmbd_vals)), dtype=object)
for i, eta in enumerate(eta_vals):
for j, lmbd in enumerate(lmbd_vals):
dnn = MLPClassifier(hidden_layer_sizes=(n_hidden_neurons), activation='logistic',
alpha=lmbd, learning_rate_init=eta, max_iter=epochs)
dnn.fit(X_train, Y_train)
DNN_scikit[i][j] = dnn
print("Learning rate = ", eta)
print("Lambda = ", lmbd)
print("Accuracy score on test set: ", dnn.score(X_test, Y_test))
print()
## Visualization
# optional
# visual representation of grid search
# uses seaborn heatmap, could probably do this in matplotlib
import seaborn as sns
sns.set()
train_accuracy = np.zeros((len(eta_vals), len(lmbd_vals)))
test_accuracy = np.zeros((len(eta_vals), len(lmbd_vals)))
for i in range(len(eta_vals)):
for j in range(len(lmbd_vals)):
dnn = DNN_scikit[i][j]
train_pred = dnn.predict(X_train)
test_pred = dnn.predict(X_test)
train_accuracy[i][j] = accuracy_score(Y_train, train_pred)
test_accuracy[i][j] = accuracy_score(Y_test, test_pred)
fig, ax = plt.subplots(figsize = (10, 10))
sns.heatmap(train_accuracy, annot=True, ax=ax, cmap="viridis")
ax.set_title("Training Accuracy")
ax.set_ylabel("$\eta$")
ax.set_xlabel("$\lambda$")
plt.show()
fig, ax = plt.subplots(figsize = (10, 10))
sns.heatmap(test_accuracy, annot=True, ax=ax, cmap="viridis")
ax.set_title("Test Accuracy")
ax.set_ylabel("$\eta$")
ax.set_xlabel("$\lambda$")
plt.show()
## Building neural networks in Tensorflow and Keras
Now we want to build on the experience gained from our neural network implementation in NumPy and scikit-learn
and use it to construct a neural network in Tensorflow. Once we have constructed a neural network in NumPy
and Tensorflow, building one in Keras is really quite trivial, though the performance may suffer.
In our previous example we used only one hidden layer, and in this we will use two. From this it should be quite
clear how to build one using an arbitrary number of hidden layers, using data structures such as Python lists or
NumPy arrays.
## Tensorflow
Tensorflow is an open source library machine learning library
developed by the Google Brain team for internal use. It was released
under the Apache 2.0 open source license in November 9, 2015.
Tensorflow is a computational framework that allows you to construct
machine learning models at different levels of abstraction, from
high-level, object-oriented APIs like Keras, down to the C++ kernels
that Tensorflow is built upon. The higher levels of abstraction are
simpler to use, but less flexible, and our choice of implementation
should reflect the problems we are trying to solve.
[Tensorflow uses](https://www.tensorflow.org/guide/graphs) so-called graphs to represent your computation
in terms of the dependencies between individual operations, such that you first build a Tensorflow *graph*
to represent your model, and then create a Tensorflow *session* to run the graph.
In this guide we will analyze the same data as we did in our NumPy and
scikit-learn tutorial, gathered from the MNIST database of images. We
will give an introduction to the lower level Python Application
Program Interfaces (APIs), and see how we use them to build our graph.
Then we will build (effectively) the same graph in Keras, to see just
how simple solving a machine learning problem can be.
To install tensorflow on Unix/Linux systems, use pip as
pip3 install tensorflow
and/or if you use **anaconda**, just write (or install from the graphical user interface)
conda install tensorflow
## Collect and pre-process data
# import necessary packages
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
# ensure the same random numbers appear every time
np.random.seed(0)
# display images in notebook
%matplotlib inline
plt.rcParams['figure.figsize'] = (12,12)
# download MNIST dataset
digits = datasets.load_digits()
# define inputs and labels
inputs = digits.images
labels = digits.target
print("inputs = (n_inputs, pixel_width, pixel_height) = " + str(inputs.shape))
print("labels = (n_inputs) = " + str(labels.shape))
# flatten the image
# the value -1 means dimension is inferred from the remaining dimensions: 8x8 = 64
n_inputs = len(inputs)
inputs = inputs.reshape(n_inputs, -1)
print("X = (n_inputs, n_features) = " + str(inputs.shape))
# choose some random images to display
indices = np.arange(n_inputs)
random_indices = np.random.choice(indices, size=5)
for i, image in enumerate(digits.images[random_indices]):
plt.subplot(1, 5, i+1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title("Label: %d" % digits.target[random_indices[i]])
plt.show()
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
# one-hot representation of labels
labels = to_categorical(labels)
# split into train and test data
train_size = 0.8
test_size = 1 - train_size
X_train, X_test, Y_train, Y_test = train_test_split(inputs, labels, train_size=train_size,
test_size=test_size)
## Using TensorFlow backend
1. Define model and architecture
2. Choose cost function and optimizer
import tensorflow as tf
class NeuralNetworkTensorflow:
def __init__(
self,
X_train,
Y_train,
X_test,
Y_test,
n_neurons_layer1=100,
n_neurons_layer2=50,
n_categories=2,
epochs=10,
batch_size=100,
eta=0.1,
lmbd=0.0):
# keep track of number of steps
self.global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
self.X_train = X_train
self.Y_train = Y_train
self.X_test = X_test
self.Y_test = Y_test
self.n_inputs = X_train.shape[0]
self.n_features = X_train.shape[1]
self.n_neurons_layer1 = n_neurons_layer1
self.n_neurons_layer2 = n_neurons_layer2
self.n_categories = n_categories
self.epochs = epochs
self.batch_size = batch_size
self.iterations = self.n_inputs // self.batch_size
self.eta = eta
self.lmbd = lmbd
# build network piece by piece
# name scopes (with) are used to enforce creation of new variables
# https://www.tensorflow.org/guide/variables
self.create_placeholders()
self.create_DNN()
self.create_loss()
self.create_optimiser()
self.create_accuracy()
def create_placeholders(self):
# placeholders are fine here, but "Datasets" are the preferred method
# of streaming data into a model
with tf.name_scope('data'):
self.X = tf.placeholder(tf.float32, shape=(None, self.n_features), name='X_data')
self.Y = tf.placeholder(tf.float32, shape=(None, self.n_categories), name='Y_data')
def create_DNN(self):
with tf.name_scope('DNN'):
# the weights are stored to calculate regularization loss later
# Fully connected layer 1
self.W_fc1 = self.weight_variable([self.n_features, self.n_neurons_layer1], name='fc1', dtype=tf.float32)
b_fc1 = self.bias_variable([self.n_neurons_layer1], name='fc1', dtype=tf.float32)
a_fc1 = tf.nn.sigmoid(tf.matmul(self.X, self.W_fc1) + b_fc1)
# Fully connected layer 2
self.W_fc2 = self.weight_variable([self.n_neurons_layer1, self.n_neurons_layer2], name='fc2', dtype=tf.float32)
b_fc2 = self.bias_variable([self.n_neurons_layer2], name='fc2', dtype=tf.float32)
a_fc2 = tf.nn.sigmoid(tf.matmul(a_fc1, self.W_fc2) + b_fc2)
# Output layer
self.W_out = self.weight_variable([self.n_neurons_layer2, self.n_categories], name='out', dtype=tf.float32)
b_out = self.bias_variable([self.n_categories], name='out', dtype=tf.float32)
self.z_out = tf.matmul(a_fc2, self.W_out) + b_out
def create_loss(self):
with tf.name_scope('loss'):
softmax_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.Y, logits=self.z_out))
regularizer_loss_fc1 = tf.nn.l2_loss(self.W_fc1)
regularizer_loss_fc2 = tf.nn.l2_loss(self.W_fc2)
regularizer_loss_out = tf.nn.l2_loss(self.W_out)
regularizer_loss = self.lmbd*(regularizer_loss_fc1 + regularizer_loss_fc2 + regularizer_loss_out)
self.loss = softmax_loss + regularizer_loss
def create_accuracy(self):
with tf.name_scope('accuracy'):
probabilities = tf.nn.softmax(self.z_out)
predictions = tf.argmax(probabilities, axis=1)
labels = tf.argmax(self.Y, axis=1)
correct_predictions = tf.equal(predictions, labels)
correct_predictions = tf.cast(correct_predictions, tf.float32)
self.accuracy = tf.reduce_mean(correct_predictions)
def create_optimiser(self):
with tf.name_scope('optimizer'):
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.eta).minimize(self.loss, global_step=self.global_step)
def weight_variable(self, shape, name='', dtype=tf.float32):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=name, dtype=dtype)
def bias_variable(self, shape, name='', dtype=tf.float32):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name, dtype=dtype)
def fit(self):
data_indices = np.arange(self.n_inputs)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(self.epochs):
for j in range(self.iterations):
chosen_datapoints = np.random.choice(data_indices, size=self.batch_size, replace=False)
batch_X, batch_Y = self.X_train[chosen_datapoints], self.Y_train[chosen_datapoints]
sess.run([DNN.loss, DNN.optimizer],
feed_dict={DNN.X: batch_X,
DNN.Y: batch_Y})
accuracy = sess.run(DNN.accuracy,
feed_dict={DNN.X: batch_X,
DNN.Y: batch_Y})
step = sess.run(DNN.global_step)
self.train_loss, self.train_accuracy = sess.run([DNN.loss, DNN.accuracy],
feed_dict={DNN.X: self.X_train,
DNN.Y: self.Y_train})
self.test_loss, self.test_accuracy = sess.run([DNN.loss, DNN.accuracy],
feed_dict={DNN.X: self.X_test,
DNN.Y: self.Y_test})
## Optimizing and using gradient descent
epochs = 100
batch_size = 100
n_neurons_layer1 = 100
n_neurons_layer2 = 50
n_categories = 10
eta_vals = np.logspace(-5, 1, 7)
lmbd_vals = np.logspace(-5, 1, 7)
DNN_tf = np.zeros((len(eta_vals), len(lmbd_vals)), dtype=object)
for i, eta in enumerate(eta_vals):
for j, lmbd in enumerate(lmbd_vals):
DNN = NeuralNetworkTensorflow(X_train, Y_train, X_test, Y_test,
n_neurons_layer1, n_neurons_layer2, n_categories,
epochs=epochs, batch_size=batch_size, eta=eta, lmbd=lmbd)
DNN.fit()
DNN_tf[i][j] = DNN
print("Learning rate = ", eta)
print("Lambda = ", lmbd)
print("Test accuracy: %.3f" % DNN.test_accuracy)
print()
# optional
# visual representation of grid search
# uses seaborn heatmap, could probably do this in matplotlib
import seaborn as sns
sns.set()
train_accuracy = np.zeros((len(eta_vals), len(lmbd_vals)))
test_accuracy = np.zeros((len(eta_vals), len(lmbd_vals)))
for i in range(len(eta_vals)):
for j in range(len(lmbd_vals)):
DNN = DNN_tf[i][j]
train_accuracy[i][j] = DNN.train_accuracy
test_accuracy[i][j] = DNN.test_accuracy
fig, ax = plt.subplots(figsize = (10, 10))
sns.heatmap(train_accuracy, annot=True, ax=ax, cmap="viridis")
ax.set_title("Training Accuracy")
ax.set_ylabel("$\eta$")
ax.set_xlabel("$\lambda$")
plt.show()
fig, ax = plt.subplots(figsize = (10, 10))
sns.heatmap(test_accuracy, annot=True, ax=ax, cmap="viridis")
ax.set_title("Test Accuracy")
ax.set_ylabel("$\eta$")
ax.set_xlabel("$\lambda$")
plt.show()
# optional
# we can use log files to visualize our graph in Tensorboard
writer = tf.summary.FileWriter('logs/')
writer.add_graph(tf.get_default_graph())
## Using Keras
Keras is a high level [neural network](https://en.wikipedia.org/wiki/Application_programming_interface)
that supports Tensorflow, CTNK and Theano as backends.
If you have Tensorflow installed Keras is available through the *tf.keras* module.
If you have Anaconda installed you may run the following command
conda install keras
Alternatively, if you have Tensorflow or one of the other supported backends install you may use the pip package manager:
pip3 install keras
or look up the [instructions here](https://keras.io/).
from keras.models import Sequential
from keras.layers import Dense
from keras.regularizers import l2
from keras.optimizers import SGD
def create_neural_network_keras(n_neurons_layer1, n_neurons_layer2, n_categories, eta, lmbd):
model = Sequential()
model.add(Dense(n_neurons_layer1, activation='sigmoid', kernel_regularizer=l2(lmbd)))
model.add(Dense(n_neurons_layer2, activation='sigmoid', kernel_regularizer=l2(lmbd)))
model.add(Dense(n_categories, activation='softmax'))
sgd = SGD(lr=eta)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
return model
DNN_keras = np.zeros((len(eta_vals), len(lmbd_vals)), dtype=object)
for i, eta in enumerate(eta_vals):
for j, lmbd in enumerate(lmbd_vals):
DNN = create_neural_network_keras(n_neurons_layer1, n_neurons_layer2, n_categories,
eta=eta, lmbd=lmbd)
DNN.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size, verbose=0)
scores = DNN.evaluate(X_test, Y_test)
DNN_keras[i][j] = DNN
print("Learning rate = ", eta)
print("Lambda = ", lmbd)
print("Test accuracy: %.3f" % scores[1])
print()
# optional
# visual representation of grid search
# uses seaborn heatmap, could probably do this in matplotlib
import seaborn as sns
sns.set()
train_accuracy = np.zeros((len(eta_vals), len(lmbd_vals)))
test_accuracy = np.zeros((len(eta_vals), len(lmbd_vals)))
for i in range(len(eta_vals)):
for j in range(len(lmbd_vals)):
DNN = DNN_keras[i][j]
train_accuracy[i][j] = DNN.evaluate(X_train, Y_train)[1]
test_accuracy[i][j] = DNN.evaluate(X_test, Y_test)[1]
fig, ax = plt.subplots(figsize = (10, 10))
sns.heatmap(train_accuracy, annot=True, ax=ax, cmap="viridis")
ax.set_title("Training Accuracy")
ax.set_ylabel("$\eta$")
ax.set_xlabel("$\lambda$")
plt.show()
fig, ax = plt.subplots(figsize = (10, 10))
sns.heatmap(test_accuracy, annot=True, ax=ax, cmap="viridis")
ax.set_title("Test Accuracy")
ax.set_ylabel("$\eta$")
ax.set_xlabel("$\lambda$")
plt.show()
## Which activation function should I use?
The Back propagation algorithm we derived above works by going from
the output layer to the input layer, propagating the error gradient on
the way. Once the algorithm has computed the gradient of the cost
function with regards to each parameter in the network, it uses these
gradients to update each parameter with a Gradient Descent (GD) step.
Unfortunately for us, the gradients often get smaller and smaller as the
algorithm progresses down to the first hidden layers. As a result, the
GD update leaves the lower layer connection weights
virtually unchanged, and training never converges to a good
solution. This is known in the literature as
**the vanishing gradients problem**.
In other cases, the opposite can happen, namely the the gradients can grow bigger and
bigger. The result is that many of the layers get large updates of the
weights the
algorithm diverges. This is the **exploding gradients problem**, which is
mostly encountered in recurrent neural networks. More generally, deep
neural networks suffer from unstable gradients, different layers may
learn at widely different speeds
## Is the Logistic activation function (Sigmoid) our choice?
Although this unfortunate behavior has been empirically observed for
quite a while (it was one of the reasons why deep neural networks were
mostly abandoned for a long time), it is only around 2010 that
significant progress was made in understanding it.
A paper titled [Understanding the Difficulty of Training Deep
Feedforward Neural Networks by Xavier Glorot and Yoshua Bengio](http://proceedings.mlr.press/v9/glorot10a.html) found that
the problems with the popular logistic
sigmoid activation function and the weight initialization technique
that was most popular at the time, namely random initialization using
a normal distribution with a mean of 0 and a standard deviation of
1.
They showed that with this activation function and this
initialization scheme, the variance of the outputs of each layer is
much greater than the variance of its inputs. Going forward in the
network, the variance keeps increasing after each layer until the
activation function saturates at the top layers. This is actually made
worse by the fact that the logistic function has a mean of 0.5, not 0
(the hyperbolic tangent function has a mean of 0 and behaves slightly
better than the logistic function in deep networks).
## The derivative of the Logistic funtion
Looking at the logistic activation function, when inputs become large
(negative or positive), the function saturates at 0 or 1, with a
derivative extremely close to 0. Thus when backpropagation kicks in,
it has virtually no gradient to propagate back through the network,
and what little gradient exists keeps getting diluted as
backpropagation progresses down through the top layers, so there is
really nothing left for the lower layers.
In their paper, Glorot and Bengio propose a way to significantly
alleviate this problem. We need the signal to flow properly in both
directions: in the forward direction when making predictions, and in
the reverse direction when backpropagating gradients. We don’t want
the signal to die out, nor do we want it to explode and saturate. For
the signal to flow properly, the authors argue that we need the
variance of the outputs of each layer to be equal to the variance of
its inputs, and we also need the gradients to have equal variance
before and after flowing through a layer in the reverse direction.
One of the insights in the 2010 paper by Glorot and Bengio was that
the vanishing/exploding gradients problems were in part due to a poor
choice of activation function. Until then most people had assumed that
if Nature had chosen to use roughly sigmoid activation functions in
biological neurons, they must be an excellent choice. But it turns out
that other activation functions behave much better in deep neural
networks, in particular the ReLU activation function, mostly because
it does not saturate for positive values (and also because it is quite
fast to compute).
## The RELU function family
The ReLU activation function suffers from a problem known as the dying
ReLUs: during training, some neurons effectively die, meaning they
stop outputting anything other than 0.
In some cases, you may find that half of your network’s neurons are
dead, especially if you used a large learning rate. During training,
if a neuron’s weights get updated such that the weighted sum of the
neuron’s inputs is negative, it will start outputting 0. When this
happen, the neuron is unlikely to come back to life since the gradient
of the ReLU function is 0 when its input is negative.
To solve this problem, nowadays practitioners use a variant of the ReLU
function, such as the leaky ReLU discussed above or the so-called
exponential linear unit (ELU) function
$$
ELU(z) = \left\{\begin{array}{cc} \alpha\left( \exp{(z)}-1\right) & z < 0,\\ z & z \ge 0.\end{array}\right.
$$
## Which activation function should we use?
In general it seems that the ELU activation function is better than
the leaky ReLU function (and its variants), which is better than
ReLU. ReLU performs better than $\tanh$ which in turn performs better
than the logistic function.
If runtime
performance is an issue, then you may opt for the leaky ReLU function over the
ELU function If you don’t
want to tweak yet another hyperparameter, you may just use the default
$\alpha$ of $0.01$ for the leaky ReLU, and $1$ for ELU. If you have
spare time and computing power, you can use cross-validation or
bootstrap to evaluate other activation functions.
## A top-down perspective on Neural networks
The first thing we would like to do is divide the data into two or three
parts. A training set, a validation or dev (development) set, and a
test set. The test set is the data on which we want to make
predictions. The dev set is a subset of the training data we use to
check how well we are doing out-of-sample, after training the model on
the training dataset. We use the validation error as a proxy for the
test error in order to make tweaks to our model. It is crucial that we
do not use any of the test data to train the algorithm. This is a
cardinal sin in ML. Then:
* Estimate optimal error rate
* Minimize underfitting (bias) on training data set.
* Make sure you are not overfitting.
If the validation and test sets are drawn from the same distributions,
then a good performance on the validation set should lead to similarly
good performance on the test set.
However, sometimes
the training data and test data differ in subtle ways because, for
example, they are collected using slightly different methods, or
because it is cheaper to collect data in one way versus another. In
this case, there can be a mismatch between the training and test
data. This can lead to the neural network overfitting these small
differences between the test and training sets, and a poor performance
on the test set despite having a good performance on the validation
set. To rectify this, Andrew Ng suggests making two validation or dev
sets, one constructed from the training data and one constructed from
the test data. The difference between the performance of the algorithm
on these two validation sets quantifies the train-test mismatch. This
can serve as another important diagnostic when using DNNs for
supervised learning.
## Limitations of supervised learning with deep networks
Like all statistical methods, supervised learning using neural
networks has important limitations. This is especially important when
one seeks to apply these methods, especially to physics problems. Like
all tools, DNNs are not a universal solution. Often, the same or
better performance on a task can be achieved by using a few
hand-engineered features (or even a collection of random
features).
Here we list some of the important limitations of supervised neural network based models.
* **Need labeled data**. All supervised learning methods, DNNs for supervised learning require labeled data. Often, labeled data is harder to acquire than unlabeled data (e.g. one must pay for human experts to label images).
* **Supervised neural networks are extremely data intensive.** DNNs are data hungry. They perform best when data is plentiful. This is doubly so for supervised methods where the data must also be labeled. The utility of DNNs is extremely limited if data is hard to acquire or the datasets are small (hundreds to a few thousand samples). In this case, the performance of other methods that utilize hand-engineered features can exceed that of DNNs.
* **Homogeneous data.** Almost all DNNs deal with homogeneous data of one type. It is very hard to design architectures that mix and match data types (i.e. some continuous variables, some discrete variables, some time series). In applications beyond images, video, and language, this is often what is required. In contrast, ensemble models like random forests or gradient-boosted trees have no difficulty handling mixed data types.
* **Many problems are not about prediction.** In natural science we are often interested in learning something about the underlying distribution that generates the data. In this case, it is often difficult to cast these ideas in a supervised learning setting. While the problems are related, it is possible to make good predictions with a *wrong* model. The model might or might not be useful for understanding the underlying science.
Some of these remarks are particular to DNNs, others are shared by all supervised learning methods. This motivates the use of unsupervised methods which in part circumvent these problems.
|
cc0-1.0
|
canast02/csci544_fall2016_project
|
yelp-sentiment/experiments/sentiment_maxent.py
|
1
|
2227
|
import numpy as np
from nltk import TweetTokenizer
from nltk.classify import MaxentClassifier
from nltk.sentiment import SentimentAnalyzer
from nltk.sentiment.util import *
from sklearn.cross_validation import StratifiedKFold
from sentiment_util import remove_stopwords, load_datasets
def main():
x, y = load_datasets(["../datasets/sentiment_uci/yelp_labelled.txt"])
stopwords = set()
with open('../stopwords.txt', 'r') as f:
for w in f:
stopwords.add(w.strip())
tok = TweetTokenizer()
x = [remove_stopwords(tok.tokenize(s.lower()), stopwords) for s in x]
x = np.array(x)
accumulate = dict()
folds = 10
for train_idx, test_idx in StratifiedKFold(y=y, n_folds=folds, shuffle=True):
train_x, train_y = x[train_idx], y[train_idx]
test_x, test_y = x[test_idx], y[test_idx]
# train_x = [remove_stopwords(tok.tokenize(s), stopwords) for s in train_x]
# test_x = [remove_stopwords(tok.tokenize(s), stopwords) for s in test_x]
train_docs = [(sent, label) for sent, label in zip(train_x, train_y)]
test_docs = [(sent, label) for sent, label in zip(test_x, test_y)]
cls = SentimentAnalyzer()
# train
words_with_neg = cls.all_words([mark_negation(a) for a in train_x])
unigram_feats = cls.unigram_word_feats(words_with_neg)
# bigram_feats = cls.bigram_collocation_feats(train_x)
cls.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats, handle_negation=True)
# cls.add_feat_extractor(extract_bigram_feats, bigrams=bigram_feats)
training_set = cls.apply_features(train_docs, labeled=True)
cls.train(MaxentClassifier.train, training_set, max_iter=10, trace=0)
# test & evaluate
test_set = cls.apply_features(test_docs)
for key, value in sorted(cls.evaluate(test_set).items()):
print('\t{0}: {1}'.format(key, value))
accumulate.setdefault(key, 0.0)
accumulate[key] += value if value is not None else 0.0
print("Averages")
for key, value in sorted(accumulate.items()):
print('\tAverage {0}: {1}'.format(key, value/folds))
if __name__ == '__main__':
main()
|
gpl-3.0
|
mblondel/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
9
|
46546
|
"""
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.random import sample_without_replacement
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = [name for name, Tree in ALL_TREES.items()
if Tree().splitter in SPARSE_SPLITTERS]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, return_indicator=True, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
"""Check classification on a toy dataset."""
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
"""Check classification on a weighted toy dataset."""
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
"""Check regression on a toy dataset."""
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
"""Check on a XOR problem"""
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
"""Check consistency on dataset iris."""
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
"""Check consistency on dataset boston house prices."""
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
"""Predict probabilities using DecisionTreeClassifier."""
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
"""Check the array representation."""
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
"""Check when y is pure."""
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
"""Check numerical stability."""
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
"""Check variable importances."""
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
"""Check if variable importance before fit raises ValueError. """
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
"""Check that gini is equivalent to mse for binary output variable"""
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
"""Check max_features."""
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
"""Test that it gives proper exception on deficient input."""
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(Exception, est.predict_proba, X)
est.fit(X, y)
X2 = [-2, -1, 1] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(Exception, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
def test_min_samples_leaf():
"""Test if leaves contain more than leaf_count training examples"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
"""Check that tree estimator are pickable """
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
"""Check estimators on multi-output problems."""
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
"""Test that n_classes_ and classes_ have proper shape."""
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
"""Check class rebalancing."""
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
"""Check that it works no matter the memory layout"""
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
"""Check sample weighting."""
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 200)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
"""Check sample weighting raises errors."""
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'auto' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='auto', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='auto', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight**2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
"""Test if class_weight raises errors and warnings when expected."""
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
"""Test greedy trees with max_depth + 1 leafs. """
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
"""Test preceedence of max_leaf_nodes over max_depth. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
"""Ensure property arrays' memory stays alive when tree disappears
non-regression for #2726
"""
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
"""Test if the warning for too large inputs is appropriate."""
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._tree import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, X)
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if TreeEstimator().splitter in SPARSE_SPLITTERS:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
|
bsd-3-clause
|
wheeler-microfluidics/dmf_control_board
|
dmf_control_board_firmware/gui/reference.py
|
3
|
13981
|
#!/usr/bin/env python
import os
from datetime import datetime
import tempfile
import pkg_resources
from path_helpers import path
import numpy as np
import pandas as pd
import gtk
from dmf_control_board_firmware import DMFControlBoard, Version
import matplotlib
from pygtkhelpers.delegates import WindowView, SlaveView
from pygtkhelpers.ui.form_view_dialog import create_form_view
from flatland.schema import String, Form, Integer, Boolean, Float
from flatland.validation import ValueAtLeast
from IPython.display import display
from dmf_control_board_firmware.calibrate.hv_attenuator import (
resistor_max_actuation_readings, fit_feedback_params,
update_control_board_calibration, plot_feedback_params)
from dmf_control_board_firmware.calibrate.oscope import (VISA_AVAILABLE,
AgilentOscope,
read_oscope as
read_oscope_)
from matplotlib.backends.backend_gtkagg import (FigureCanvasGTKAgg as
FigureCanvasGTK)
from matplotlib.backends.backend_gtkagg import (NavigationToolbar2GTKAgg as
NavigationToolbar)
from matplotlib.figure import Figure
if VISA_AVAILABLE:
import visa
class AssistantView(WindowView):
def __init__(self, control_board):
self.control_board = control_board
self.settings = {}
self.settings['frequency'] = self.control_board.waveform_frequency()
self.settings['voltage'] = self.control_board.waveform_voltage()
self.settings['amplifier_gain'] = self.control_board.amplifier_gain
self.settings['auto_adjust_amplifier_gain'] = \
self.control_board.auto_adjust_amplifier_gain
super(AssistantView, self).__init__(self)
def restore_settings(self):
for k in ('amplifier_gain', 'auto_adjust_amplifier_gain'):
setattr(self.control_board, k, self.settings[k])
self.control_board.set_waveform_frequency(self.settings['frequency'])
self.control_board.set_waveform_voltage(self.settings['voltage'])
def create_ui(self):
self.widget = gtk.Assistant()
self.widget.connect("prepare", self.assistant_prepared)
self.widget.connect("cancel", self.cancel_button_clicked)
self.widget.connect("close", self.close_button_clicked)
self.widget.connect("apply", self.apply_button_clicked)
# # Introduction #
box = gtk.HBox()
self.widget.append_page(box)
self.widget.set_page_type(box, gtk.ASSISTANT_PAGE_INTRO)
self.widget.set_page_title(box, "Introduction")
content = ('This wizard will guide you through the process of '
'calibrating the high-voltage reference load feedback '
'measurement circuit. This feedback circuit is used to '
'measure the output voltage of the amplifier on the control'
'board.\n\nSee '
r'<a href="http://microfluidics.utoronto.ca/trac/dropbot/wiki/Control board calibration#high-voltage-attenuation-calibration">'
'here</a> for more details.')
label = gtk.Label(content)
label.set_use_markup(True)
label.set_line_wrap(True)
image = gtk.Image()
img_path = pkg_resources.resource_filename(
'dmf_control_board_firmware', 'gui/reference_feedback_intro.png')
image.set_from_file(str(img_path))
box.pack_start(label, True, False, padding=15)
box.pack_start(image, True, True, padding=5)
self.widget.set_page_complete(box, True)
# # Connect hardware #
box = gtk.HBox()
self.widget.append_page(box)
self.widget.set_page_type(box, gtk.ASSISTANT_PAGE_CONTENT)
self.widget.set_page_title(box, "Connect hardware")
label = gtk.Label(' - Connect DropBot "<tt>Out to Amp</tt>" to amplifier input.\n'
' - Use T-splitter to connect amplifier output to:\n'
' 1) DropBot "<tt>In from Amp</tt>".\n'
' 2) Oscilloscope input.')
image = gtk.Image()
img_path = pkg_resources.resource_filename(
'dmf_control_board_firmware', 'gui/reference_feedback_setup.png')
image.set_from_file(str(img_path))
label.set_line_wrap(True)
label.set_use_markup(True)
box.pack_start(label, True, False, padding=15)
box.pack_start(image, True, True, padding=5)
self.widget.set_page_complete(box, True)
# # Select frequencies #
minimum = self.control_board.min_waveform_frequency
maximum = self.control_board.max_waveform_frequency
form = Form.of(
Integer.named('start_frequency').using(
default=minimum, optional=True,
validators=[ValueAtLeast(minimum=minimum), ]),
Integer.named('end_frequency').using(
default=maximum, optional=True,
validators=[ValueAtLeast(minimum=minimum), ]),
Integer.named('number_of_steps').using(
default=10, optional=True,
validators=[ValueAtLeast(minimum=2), ]),
)
box = gtk.HBox()
self.form_view = create_form_view(form)
self.form_view.form.proxies.connect('changed', display)
box.pack_start(self.form_view.widget, fill=False, padding=40)
self.widget.append_page(box)
self.widget.set_page_type(box, gtk.ASSISTANT_PAGE_CONTENT)
self.widget.set_page_title(box, "Select calibration frequencies")
self.widget.set_page_complete(box, True)
# # Record measurements #
box1 = gtk.VBox()
self.widget.append_page(box1)
self.widget.set_page_type(box1, gtk.ASSISTANT_PAGE_PROGRESS)
self.widget.set_page_title(box1, "Record measurements")
self.measurements_label = gtk.Label('Ready')
self.measurements_label.set_line_wrap(True)
self.measure_progress = gtk.ProgressBar()
self.measure_progress.set_size_request(300, 40)
box1.pack_start(self.measurements_label, True, True, 0)
box1.pack_start(self.measure_progress, expand=False, fill=False,
padding=15)
self.box1 = box1
# # Confirm fitted parameters #
box = gtk.VBox()
self.widget.append_page(box)
self.widget.set_page_type(box, gtk.ASSISTANT_PAGE_CONFIRM)
self.widget.set_page_title(box, "Confirm fitted parameters")
figure = Figure(figsize=(14, 8), dpi=60)
self.canvas = FigureCanvasGTK(figure)
toolbar = NavigationToolbar(self.canvas, self.widget)
self.axis = figure.add_subplot(111)
box.pack_start(self.canvas)
box.pack_start(toolbar, False, False)
self.widget.set_page_complete(box, True)
# # Summary #
box = gtk.VBox()
self.widget.append_page(box)
self.widget.set_page_type(box, gtk.ASSISTANT_PAGE_SUMMARY)
self.widget.set_page_title(box, "Summary")
label = gtk.Label('Calibration of reference load feedback circuit is '
'complete. The high-voltage output from amplifier '
'should now be measured accurately by the control '
'board.')
label.set_line_wrap(True)
box.pack_start(label, True, True, 0)
self.widget.set_page_complete(box, True)
def assistant_prepared(self, assistant, *args):
print 'Page %s prepared.' % assistant.get_current_page()
if assistant.get_current_page() < 3:
self.widget.set_page_complete(self.box1, False)
if assistant.get_current_page() == 3:
settings = dict([(f, self.form_view.form.fields[f].proxy
.get_widget_value())
for f in ('start_frequency', 'number_of_steps',
'end_frequency')])
start_frequency = np.array(settings['start_frequency'])
end_frequency = np.array(settings['end_frequency'])
number_of_steps = np.array(settings['number_of_steps'])
frequencies = np.logspace(np.log10(start_frequency),
np.log10(end_frequency), number_of_steps)
self.reset_measurement_count(frequencies)
gtk.idle_add(self.read_measurements, frequencies)
elif assistant.get_current_page() == 4:
self.fit_feedback_params()
display(self.fitted_params)
hw_version = Version.fromstring(self.control_board
.hardware_version()).major
plot_feedback_params(hw_version, self.hv_readings,
self.fitted_params, axis=self.axis)
@property
def resistor_count(self):
return len(self.control_board.a0_series_resistance)
def reset_measurement_count(self, frequencies):
self.measurement_count = len(frequencies) * self.resistor_count + 2
self.measurement_i = 0
self.measurements_label.set_label('Ready.')
def read_measurements(self, frequencies):
gtk.gdk.threads_enter()
self.reset_measurement_count(frequencies)
gtk.gdk.threads_leave()
try:
if VISA_AVAILABLE:
try:
oscope = AgilentOscope()
self._read_oscope = lambda: oscope.read_ac_vrms()
except visa.VisaIOError:
self._read_oscope = read_oscope_
else:
self._read_oscope = read_oscope_
self.hv_readings = resistor_max_actuation_readings(
self.control_board, frequencies, self.read_oscope)
gtk.gdk.threads_enter()
self.widget.set_page_complete(self.box1, True)
gtk.gdk.threads_leave()
except StopIteration:
self.measurements_label.set_label('Ready.')
self.widget.set_current_page(2)
finally:
self.restore_settings()
def read_oscope(self):
gtk.gdk.threads_enter()
result = self._read_oscope()
self.measurement_i += 1
self.measurements_label.set_label('Measuring amplifier voltage '
'readings...')
self.measure_progress.set_fraction(float(self.measurement_i) /
self.measurement_count)
self.measure_progress.set_text('Measurement: %s/%s' %
(self.measurement_i,
self.measurement_count))
gtk.gdk.threads_leave()
return result
def apply_button_clicked(self, assistant):
# Update the control board with the computed resistive and capacitive
# load values. The control board uses these values to compute `V1`
# using a transfer function when calculating the gain of the amplifier.
update_control_board_calibration(self.control_board,
self.fitted_params)
def close_button_clicked(self, assistant):
print("The 'Close' button has been clicked")
gtk.main_quit()
def cancel_button_clicked(self, assistant):
print("The 'Cancel' button has been clicked")
gtk.main_quit()
output_dir = path(tempfile.mkdtemp(prefix='dropbot-reference-calibration'))
timestamp = datetime.now().strftime('%Y-%m-%dT%Hh%Mm%S')
self.calibration_file = output_dir.joinpath('%s-calibration.h5' %
timestamp)
def fit_feedback_params(self):
# Using the collected measurements, fit the resistive and *(parasitic)*
# capacitive load values for the reference *(i.e., high-voltage)*
# feedback resistor ladder.
self.fitted_params = fit_feedback_params(self.control_board
.calibration,
self.hv_readings)
def to_hdf(self, output_path, complib='zlib', complevel=6):
# save control board meta data
proxy = self.control_board
df = {}
df['software_version'] = proxy.software_version()
df['hardware_version'] = proxy.hardware_version()
df['serial_number'] = str(proxy.serial_number)
df['aref'] = str(proxy.__aref__) # need to store as string or to_hdf
# will raise an error
for address, desc in sorted(proxy._i2c_devices.items()):
df['i2c address %d' % address] = desc
pd.Series(df).to_hdf(str(output_path),
'/feedback/reference/control_board_info',
format='t', complib=complib, complevel=complevel)
# Save measurements taken during calibration, along with input RMS
# voltage _(i.e., `V1`)_ values read using the oscilloscope.
self.hv_readings.to_hdf(str(output_path),
'/feedback/reference/measurements', format='t',
data_columns=self.hv_readings.columns,
complib=complib, complevel=complevel)
# Save fitted resistive and capacitive impedance values.
self.fitted_params.to_hdf(str(output_path),
'/feedback/reference/fitted_params',
format='t',
data_columns=self.fitted_params.columns,
complib=complib, complevel=complevel)
if __name__ == '__main__':
control_board = DMFControlBoard()
control_board.connect()
view = AssistantView(control_board)
view.show_and_run()
|
gpl-3.0
|
VirusTotal/msticpy
|
msticpy/sectools/tiproviders/azure_sent_byoti.py
|
1
|
4854
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Azure Sentinel TI provider class.
Input can be a single IoC observable or a pandas DataFrame containing
multiple observables. Processing may require a an API key and
processing performance may be limited to a specific number of
requests per minute for the account type that you have.
"""
from typing import Any, Dict, Tuple
import pandas as pd
from ..._version import VERSION
from ...common.utility import export
from .ti_provider_base import LookupResult, TISeverity
from .kql_base import KqlTIProvider
__version__ = VERSION
__author__ = "Ian Hellen"
@export
class AzSTI(KqlTIProvider):
"""Azure Sentinel TI provider class."""
_IOC_QUERIES: Dict[str, tuple] = {
"ipv4": ("ThreatIntelligence.list_indicators_by_ip", {"ioc": "observables"}),
"file_hash": (
"ThreatIntelligence.list_indicators_by_hash",
{"ioc": "observables"},
),
"windows_path": (
"ThreatIntelligence.list_indicators_by_filepath",
{"ioc": "observables"},
),
"dns": ("ThreatIntelligence.list_indicators_by_domain", {"ioc": "observables"}),
"url": ("ThreatIntelligence.list_indicators_by_url", {"ioc": "observables"}),
}
# aliases
_IOC_QUERIES["ipv6"] = _IOC_QUERIES["ipv4"]
_IOC_QUERIES["md5_hash"] = _IOC_QUERIES["file_hash"]
_IOC_QUERIES["sha1_hash"] = _IOC_QUERIES["file_hash"]
_IOC_QUERIES["sha256_hash"] = _IOC_QUERIES["file_hash"]
_IOC_QUERIES["linux_path"] = _IOC_QUERIES["windows_path"]
_IOC_QUERIES["hostname"] = _IOC_QUERIES["dns"]
def parse_results(self, response: LookupResult) -> Tuple[bool, TISeverity, Any]:
"""
Return the details of the response.
Parameters
----------
response : LookupResult
The returned data response
Returns
-------
Tuple[bool, TISeverity, Any]
bool = positive or negative hit
TISeverity = enumeration of severity
Object with match details
"""
if response.raw_result is None:
return False, TISeverity.information, "No data"
severity = TISeverity.warning
# if this is a series (single row) return a dictionary
if isinstance(response.raw_result, pd.Series):
extracted_data = response.raw_result[
["Action", "ThreatType", "ThreatSeverity", "Active", "ConfidenceScore"]
].to_dict()
if extracted_data["Action"].lower() in ["alert", "block"]:
severity = TISeverity.high
return True, TISeverity.warning, extracted_data
# if this is a dataframe (multiple rows)
# concatenate the values for each column/record into a list
# and return as a dictionary
if isinstance(response.raw_result, pd.DataFrame):
d_frame = response.raw_result
if d_frame["Action"].str.lower().isin(["alert", "block"]).any():
severity = TISeverity.high
return (
True,
severity,
{
"Action": self._series_to_list(d_frame["Action"]),
"ThreatType": self._series_to_list(d_frame["ThreatType"]),
"ThreatSeverity": self._series_to_list(d_frame["ThreatSeverity"]),
"Active": self._series_to_list(d_frame["Active"]),
"Description": self._series_to_list(d_frame["Description"]),
"ConfidenceScore": self._series_to_list(d_frame["ConfidenceScore"]),
},
)
return False, TISeverity.information, "No data"
@staticmethod
def _get_detail_summary(data_result: pd.DataFrame) -> pd.Series:
# For the input frame return details in a series with
# Details in dict
return data_result.apply(
lambda x: {
"Action": x.Action,
"ThreatType": x.ThreatType,
"ThreatSeverity": x.ThreatSeverity,
"Active": x.Active,
"Description": x.Description,
"ConfidenceScore": x.ConfidenceScore,
},
axis=1,
)
@staticmethod
def _get_severity(data_result: pd.DataFrame) -> pd.Series:
# For the input frame return severity in a series
return data_result.apply(
lambda x: TISeverity.high.value
if x.Action.lower() in ["alert", "block"]
else TISeverity.warning.value,
axis=1,
)
|
mit
|
thekatiebr/NEMO_II
|
Classifiers/KnowledgeIntegrator.py
|
1
|
9469
|
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from KnowledgeBase import KnowledgeBase
from Classifiers import ML_Controller
from collections import deque
from sklearn.utils import shuffle
from sklearn.metrics import classification_report,confusion_matrix, accuracy_score, precision_score, f1_score, recall_score
import pandas
#import NEMO
import MySQLdb
import threading
import sys
import os
import time
#git test for git
class KnowledgeIntegrator:
def __init__(self, kb, level1_classifiers, stacking_classifier=None, other_predictions=None, use_features=False):
self.kb = kb
self.level1_classifiers = level1_classifiers
if stacking_classifier is None or stacking_classifier == "Logistic Regression":
self.algorithm_name = "KI_LogisticRegression"
self.stacking_classifier = LogisticRegression()
elif stacking_classifier == "Decision Tree":
self.stacking_classifier = DecisionTreeClassifier()
self.algorithm_name = "KI_DecisionTree"
elif stacking_classifier == "SVM":
self.stacking_classifier = SVC()
self.algorithm_name = "KI_SVM"
self.meta_data_set = []
self.other_predictions = other_predictions
#self.keys.append(self.kb.Y)
self.algorithm_id = "000000000"
self.use_features = use_features
def trainLevelOneModels(self, fold):
xtrain, xtest, ytrain, ytest = fold
if self.other_predictions is not None:
split = self.splitIntoAttributesOther(xtrain)
xtrain = split[0]
other_train = split[1]
split = self.splitIntoAttributesOther(xtest)
xtest = split[0]
other_test = split[1]
for classifier in self.level1_classifiers:
classifier.createModelPreSplit(xtrain, xtest, ytrain, ytest)
def evaluateLevelOneModels(self, x):
to_return = [] #holds list of predictions and truth
for classifier in self.level1_classifiers:
curr = classifier.predict(x)
# print curr
# print ""
to_return.append(curr)
return to_return
def trainAndCreateMetaDataSet(self, folds):
self.resetKeys()
names = []
names = self.keys
names.append(self.kb.Y)
self.meta_data_set = []
for fold in folds:
xtrain, xtest, ytrain, ytest = fold
other_train = None
other_test = None
#strip the other_predictions -> other_predictions_train, other_predictions_test
if self.other_predictions is not None:
split = self.splitIntoAttributesOther(xtrain)
xtrain = split[0]
other_train = split[1]
split = self.splitIntoAttributesOther(xtest)
xtest = split[0]
other_test = split[1]
self.trainLevelOneModels(fold)
predictions = self.evaluateLevelOneModels(xtest)
#append the other_predictions_test
if self.other_predictions is not None:
#print other_test
predictions.append(other_test.values)
#print pandas.DataFrame(predictions).T
predictions.append(ytest.values)
predictions = pandas.DataFrame(predictions).T
#print predictions
predictions.columns = names
if self.use_features:
predictions.index = xtest.index
#print predictions
predictions = predictions.merge(xtest, left_index=True, right_index=True)
# print predictions
self.meta_data_set.append(predictions)
self.meta_data_set = pandas.concat(self.meta_data_set)
def createMetaDataSet(self, folds):
self.resetKeys()
names = self.keys
names.append(self.kb.Y)
set = []
for fold in folds:
xtrain, xtest, ytrain, ytest = fold
other_train = None
other_test = None
#strip other_predictions into other_predictions_train, other_predictions_test
if self.other_predictions is not None:
split = self.splitIntoAttributesOther(xtrain)
xtrain = split[0]
other_train = split[1]
split = self.splitIntoAttributesOther(xtest)
xtest = split[0]
other_test = split[1]
predictions = self.evaluateLevelOneModels(xtest)
if self.other_predictions is not None:
predictions.append(other_test.values)
predictions.append(ytest.values)
predictions = pandas.DataFrame(predictions).T
predictions.columns = names
if self.use_features:
predictions.index = xtest.index
#print predictions
predictions = predictions.merge(xtest, left_index=True, right_index=True)
# print predictions
self.meta_data_set.append(predictions)
self.meta_data_set = pandas.DataFrame(self.meta_data_set)
def trainMetaModel(self, data=None):
if data is None:
data = self.meta_data_set
# print data
# print "Data"
# print data
x,y,features = self.splitMetaIntoXY(data)
#print x
x = self.transform(x)
if self.use_features:
x.index = features.index
x = x.merge(features, right_index = True, left_index = True)
print "x"
print x
self.stacking_classifier.fit(x, y)
def transform(self, x):
le = preprocessing.LabelEncoder()
#LabelEncoder()
le.fit([ "c34.0", "c34.1", "c34.2", "c34.3", "c34.9", "c50.1", "c50.2", "c50.3", "c50.4", "c50.5", "c50.8", "c50.9"])
new_x = []
for column in x:
#print column
column = x[column]
#print column
new_x.append(le.transform(column))
x = pandas.DataFrame(new_x)
return x.T
def runModel(self, data):
print len(data)
print "data"
#print data
x,y = self.splitIntoXY(data)
#strip the other_predictions
other = None
if self.other_predictions is not None:
split = self.splitIntoAttributesOther(x)
x = split[0]
other = split[1]
predictions = self.evaluateLevelOneModels(x)
#append other_predictions
if self.other_predictions is not None:
predictions.append(other.values)
predictions.append(y.values)
self.resetKeys()
names = self.keys
names.append(self.kb.Y)
print len(self.meta_data_set)
self.meta_data_set = pandas.DataFrame(predictions).T
self.meta_data_set.columns = names
if self.use_features:
self.meta_data_set.index = x.index
self.meta_data_set = self.meta_data_set.merge(x, left_index=True, right_index=True)
print self.meta_data_set
x,y,features = self.splitMetaIntoXY(self.meta_data_set)
print "y"
print y
print "features"
print features
x = self.transform(x)
print "x"
print x
if self.use_features:
x.index = features.index
x = x.merge(features, right_index = True, left_index = True)
print x
predictions = self.stacking_classifier.predict(x)
av = 'micro'
accuracy = accuracy_score(y,predictions)
precision = precision_score(y,predictions, average=av)
recall = recall_score(y, predictions, average=av)
f1 = f1_score(y,predictions, average=av)
cm = confusion_matrix(y,predictions)
#to_return = {"Accuracy": accuracy, "Precision": precision, "Recall": recall, "F1": f1, "Confusion_Matrix": cm}
to_return = {"Accuracy": accuracy, "Precision": precision, "Recall": recall, "F1": f1, "Confusion_Matrix": cm}
return to_return
def testKI(self, splits, num_folds, random_seed):
print "in test KI"
print self.meta_data_set
self.meta_data_set = []
holdout = splits.pop()
remain = pandas.concat(splits)
folded_data = deque(self.splitIntoFolds(remain, num_folds, random_seed))
folds = []
for i in range(0, num_folds):
curr = folded_data.popleft()
info = self.getTestTraining(curr, folded_data)
folds.append(info)
folded_data.append(curr)
#print len(folds
self.trainAndCreateMetaDataSet(folds)
self.trainMetaModel()
xtrain, ytrain = self.splitIntoXY(remain)
fold = (xtrain, None, ytrain, None)
self.trainLevelOneModels(fold)
curr_res = self.runModel(holdout)
print "Holdout Results: " + str(curr_res)
curr_res["ID"] = self.algorithm_id
curr_res["Name"] = self.algorithm_name
self.results = curr_res
return curr_res
def splitIntoFolds(self, data, k, seed):
shuffled_data = shuffle(data, random_state=seed)
#print shuffled_data
folds = []
num_in_folds = len(data) / k
start = 0
end = num_in_folds - 1
for i in range(0,k):
fold = shuffled_data.iloc[start:end]
start = end
end = end + num_in_folds - 1
#print fold
folds.append(self.splitIntoXY(fold))
return folds
def getTestTraining(self, curr, others):
xtest = curr[0]
ytest = curr[1]
xtrainsets = []
ytrainsets = []
for curr in others:
xtrainsets.append(pandas.DataFrame(curr[0]))
ytrainsets.append(pandas.DataFrame(curr[1]))
xtrain = pandas.concat(xtrainsets)
ytrain = pandas.concat(ytrainsets)
return xtrain, xtest, ytrain, ytest
def crossValidateMetaModel(self, k):
pass
def getName(self):
return self.algorithm_name
def splitMetaIntoXY(self, data):
self.resetKeys()
#print data
y = data[self.kb.Y]
x = data[self.keys]
try:
x_cols = list(set(self.kb.X) - set(self.keys))
features = data[x_cols]
except:
features = None
return(x,y,features)
def splitIntoAttributesOther(self, data):
if data is not None:
atr = list(set(self.kb.X) - set(self.other_predictions))
x = data[atr]
other = data[self.other_predictions]
return(x,other)
else:
return (None, None)
def splitIntoXY(self, data):
#print data
#print(data.columns.tolist())
y = data[self.kb.Y] #need to change to reflect varying data...
#print y
x = data[self.kb.X]
#print x
return (x,y)
def resetKeys(self):
self.keys = []
for classifier in self.level1_classifiers:
key = classifier.getName() + "_" + classifier.getID()
self.keys.append(key)
if self.other_predictions is not None:
for name in self.other_predictions:
self.keys.append(name)
|
apache-2.0
|
btabibian/scikit-learn
|
examples/covariance/plot_outlier_detection.py
|
36
|
5023
|
"""
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates three
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
- using the Isolation Forest algorithm, which is based on random forests and
hence more adapted to large-dimensional settings, even if it performs
quite well in the examples below.
- using the Local Outlier Factor to measure the local deviation of a given
data point with respect to its neighbors by comparing their local density.
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
print(__doc__)
rng = np.random.RandomState(42)
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"Robust covariance": EllipticEnvelope(contamination=outliers_fraction),
"Isolation Forest": IsolationForest(max_samples=n_samples,
contamination=outliers_fraction,
random_state=rng),
"Local Outlier Factor": LocalOutlierFactor(
n_neighbors=35,
contamination=outliers_fraction)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 100), np.linspace(-7, 7, 100))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = -1
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(n_inliers // 2, 2) - offset
X2 = 0.3 * np.random.randn(n_inliers // 2, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model
plt.figure(figsize=(9, 7))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
if clf_name == "Local Outlier Factor":
y_pred = clf.fit_predict(X)
scores_pred = clf.negative_outlier_factor_
else:
clf.fit(X)
scores_pred = clf.decision_function(X)
y_pred = clf.predict(X)
threshold = stats.scoreatpercentile(scores_pred,
100 * outliers_fraction)
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
if clf_name == "Local Outlier Factor":
# decision_function is private for LOF
Z = clf._decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(2, 2, i + 1)
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=10),
loc='lower right')
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.suptitle("Outlier detection")
plt.show()
|
bsd-3-clause
|
zaxtax/scikit-learn
|
sklearn/datasets/tests/test_lfw.py
|
55
|
7877
|
"""This test for the LFW require medium-size data downloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
|
bsd-3-clause
|
jfsehuanes/thunderfish
|
thunderfish/harmonicgroups.py
|
1
|
53719
|
"""Functions for extracting harmonic groups from a power spectrum.
harmonic_groups(): detect peaks in a power spectrum and groups them
according to their harmonic structure.
extract_fundamentals(): collect harmonic groups from lists of power spectrum peaks.
threshold_estimate(): estimates thresholds for peak detection in a power spectrum.
fundamental_freqs(): extract the fundamental frequencies from lists of harmonic groups
as returned by harmonic_groups().
colors_markers(): Generate a list of colors and markers for plotting.
plot_harmonic_groups(): Mark decibel power of fundamentals and their harmonics.
plot_psd_harmonic_groups(): Plot decibel power-spectrum with detected peaks, harmonic groups, and mains frequencies.
"""
from __future__ import print_function
import numpy as np
from .peakdetection import detect_peaks, accept_peaks_size_width, hist_threshold
from .powerspectrum import decibel, plot_decibel_psd
try:
import matplotlib.pyplot as plt
import matplotlib.colors as mc
except ImportError:
pass
def build_harmonic_group(freqs, more_freqs, deltaf, verbose=0, min_freq=20.0, max_freq=2000.0,
freq_tol_fac=0.7, max_divisor=4, max_upper_fill=1,
max_double_use_harmonics=8, max_double_use_count=1,
max_fill_ratio=0.25, power_n_harmonics=10, **kwargs):
"""Find all the harmonics belonging to the largest peak in a list of frequency peaks.
Args:
freqs (2-D numpy array): list of frequency, height, size, width and count of
strong peaks in a power spectrum.
more_freqs (): list of frequency, height, size, width and count of
all peaks in a power spectrum.
deltaf (float): frequency resolution of the power spectrum
verbose (int): verbosity level
min_freq (float): minimum frequency accepted as a fundamental frequency
max_freq (float): maximum frequency accepted as a fundamental frequency
freq_tol_fac (float): harmonics need to fall within deltaf*freq_tol_fac
max_divisor (int): maximum divisor used for checking for sub-harmonics
max_upper_fill (int): maximum number of frequencies that are allowed to be filled in
(i.e. they are not contained in more_freqs) above the frequency of the
largest peak in freqs for constructing a harmonic group.
max_double_use_harmonics (int): maximum harmonics for which double uses of peaks
are counted.
max_double_use_count (int): maximum number of harmonic groups a single peak can be part of.
max_fill_ratio (float): maximum allowed fraction of filled in frequencies.
power_n_harmonics (int): maximum number of harmonics over which the total power
of the signal is computed.
Returns:
freqs (2-D numpy array): list of strong frequencies with the frequencies of group removed
more_freqs (2-D numpy array): list of all frequencies with updated double-use counts
group (2-D numpy array): the detected harmonic group. Might be empty.
best_fzero_harmonics (int): the highest harmonics that was used to recompute
the fundamental frequency
fmax (float): the frequency of the largest peak in freqs for which the harmonic group was detected.
"""
# start at the strongest frequency:
fmaxinx = np.argmax(freqs[:, 1])
fmax = freqs[fmaxinx, 0]
if verbose > 1:
print('')
print(70 * '#')
print('freqs: ', '[', ', '.join(['{:.2f}'.format(f) for f in freqs[:, 0]]), ']')
print('more_freqs:', '[', ', '.join(
['{:.2f}'.format(f) for f in more_freqs[:, 0] if f < max_freq]), ']')
print('## fmax is: {0: .2f}Hz: {1:.5g} ##\n'.format(fmax, np.max(freqs[:, 1])))
# container for harmonic groups
best_group = list()
best_moregroup = list()
best_group_peaksum = 0.0
best_group_fill_ins = 1000000000
best_divisor = 0
best_fzero = 0.0
best_fzero_harmonics = 0
freqtol = freq_tol_fac * deltaf
# ###########################################
# SEARCH FOR THE REST OF THE FREQUENCY GROUP
# start with the strongest fundamental and try to gather the full group of available harmonics
# In order to find the fundamental frequency of a harmonic group,
# we divide fmax (the strongest frequency in the spectrum)
# by a range of integer divisors.
# We do this, because fmax could just be a strong harmonic of the harmonic group
for divisor in range(1, max_divisor + 1):
# define the hypothesized fundamental, which is compared to all higher frequencies:
fzero = fmax / divisor
# fzero is not allowed to be smaller than our chosen minimum frequency:
# if divisor > 1 and fzero < min_freq: # XXX why not also for divisor=1???
# break
fzero_harmonics = 1
if verbose > 1:
print('# divisor:', divisor, 'fzero=', fzero)
# ###########################################
# SEARCH ALL DETECTED FREQUENCIES in freqs
# this in the end only recomputes fzero!
newgroup = list()
npre = -1 # previous harmonics
ndpre = 0.0 # difference of previous frequency
connected = True
for j in range(freqs.shape[0]):
if verbose > 2:
print('check freq {:3d} {:8.2f} '.format(j, freqs[j, 0]), end='')
# IS THE CURRENT FREQUENCY AN INTEGRAL MULTIPLE OF FZERO?
# divide the frequency-to-be-checked by fzero
# to get the multiplication factor between freq and fzero
n = np.round(freqs[j, 0] / fzero)
if n == 0:
if verbose > 2:
print('discarded: n == 0')
continue
# !! the difference between the current frequency, divided by the derived integer,
# and fzero should be very very small: 1 resolution step of the fft
# (freqs[j,0] / n) = should be fzero, plus minus a little tolerance,
# which is the fft resolution
nd = np.abs((freqs[j, 0] / n) - fzero)
# ... compare it to our tolerance
if nd > freqtol:
if verbose > 2:
print('discarded: not a harmonic n=%2d d=%5.2fHz tol=%5.2fHz' % (n, nd, freqtol))
continue
# two succeeding frequencies should also differ by
# fzero plus/minus twice! the tolerance:
if len(newgroup) > 0:
nn = np.round((freqs[j, 0] - freqs[newgroup[-1], 0]) / fzero)
if nn == 0:
# the current frequency is the same harmonic as the previous one
# print(divisor, j, freqs[j,0], freqs[newgroup[-1],0])
if len(newgroup) > 1:
# check whether the current frequency is fzero apart from the previous harmonics:
nn = np.round((freqs[j, 0] - freqs[newgroup[-2], 0]) / fzero)
nnd = np.abs(((freqs[j, 0] - freqs[newgroup[-2], 0]) / nn) - fzero)
if nnd > 2.0 * freqtol:
if verbose > 2:
print('discarded: distance to previous harmonics %2d %5.2f %5.2f %8.2f'
% (nn, nnd, freqtol, fzero))
continue
if ndpre < nd:
# the previous frequency is closer to the harmonics, keep it:
if verbose > 2:
print('discarded: previous harmonics is closer %2d %5.2f %5.2f %5.2f %8.2f' %
(n, nd, ndpre, freqtol, fzero))
continue
else:
# the current frequency is closer to the harmonics, remove the previous one:
newgroup.pop()
else:
# check whether the current frequency is fzero apart from the previous harmonics:
nnd = np.abs(((freqs[j, 0] - freqs[newgroup[-1], 0]) / nn) - fzero)
if nnd > 2.0 * freqtol:
if verbose > 2:
print('discarded: distance to previous harmonics %2d %5.2f %5.2f %8.2f' %
(nn, nnd, freqtol, fzero))
continue
# take frequency:
newgroup.append(j) # append index of frequency
if verbose > 2:
print('append n={:.2f} d={:5.2f}Hz tol={:5.2f}Hz'.format(freqs[j, 0] / fzero, nd, freqtol))
if npre >= 0 and n - npre > 1:
connected = False
npre = n
ndpre = nd
if connected:
# adjust fzero as we get more information from the higher frequencies:
fzero = freqs[j, 0] / n
fzero_harmonics = int(n)
if verbose > 2:
print('adjusted fzero to', fzero)
if verbose > 3:
print('newgroup:', divisor, fzero, newgroup)
newmoregroup = list()
fill_ins = 0
double_use = 0
ndpre = 0.0 # difference of previous frequency
# ###########################################
# SEARCH ALL DETECTED FREQUENCIES in morefreqs
for j in range(more_freqs.shape[0]):
if verbose > 3:
print('check more_freq %3d %8.2f ' % (j, more_freqs[j, 0]), end='')
# IS FREQUENCY A AN INTEGRAL MULTIPLE OF FREQUENCY B?
# divide the frequency-to-be-checked with fzero:
# what is the multiplication factor between freq and fzero?
n = np.round(more_freqs[j, 0] / fzero)
if n == 0:
if verbose > 3:
print('discarded: n == 0')
continue
# !! the difference between the detection, divided by the derived integer
# , and fzero should be very very small: 1 resolution step of the fft
# (more_freqs[j,0] / n) = should be fzero, plus minus a little tolerance,
# which is the fft resolution
nd = np.abs((more_freqs[j, 0] / n) - fzero)
# ... compare it to our tolerance
if nd > freqtol:
if verbose > 3:
print('discarded: not a harmonic n=%2d d=%5.2fHz tol=%5.2fHz' % (n, nd, freqtol))
continue
# two succeeding frequencies should also differ by fzero plus/minus tolerance:
if len(newmoregroup) > 0:
nn = np.round((more_freqs[j, 0] - more_freqs[newmoregroup[-1], 0]) / fzero)
if nn == 0:
# the current frequency is close to the same harmonic as the previous one
# print(n, newmoregroup[-1], ( more_freqs[j,0] - more_freqs[newmoregroup[-1],0] )/fzero)
# print(divisor, j, n, more_freqs[j,0], more_freqs[newmoregroup[-1],0], more_freqs[newmoregroup[-2],0], newmoregroup[-2])
if len(newmoregroup) > 1 and newmoregroup[-2] >= 0:
# check whether the current frequency is fzero apart from the previous harmonics:
nn = np.round((more_freqs[j, 0] - more_freqs[newmoregroup[-2], 0]) / fzero)
nnd = np.abs(((more_freqs[j, 0] - more_freqs[newmoregroup[-2], 0]) / nn) - fzero)
if nnd > 2.0 * freqtol:
if verbose > 3:
print('discarded: distance to previous harmonics %2d %5.2f %5.2f %8.2f' %
(nn, nnd, freqtol, fzero))
continue
if ndpre < nd:
# the previous frequency is closer to the harmonics, keep it:
if verbose > 3:
print('discarded: previous harmonics is closer %2d %5.2f %5.2f %5.2f %8.2f' %
(n, nd, ndpre, freqtol, fzero))
continue
else:
# the current frequency is closer to the harmonics, remove the previous one:
newmoregroup.pop()
else:
# check whether the current frequency is fzero apart from the previous harmonics:
nnd = np.abs(((more_freqs[j, 0] - more_freqs[newmoregroup[-1], 0]) / nn) - fzero)
if nnd > 2.0 * freqtol:
if verbose > 3:
print('discarded: distance to previous harmonics %2d %5.2f %5.2f %8.2f' %
(nn, nnd, freqtol, fzero))
continue
ndpre = nd
# too many fill-ins upstream of fmax ?
if more_freqs[j, 0] > fmax and n - 1 - len(newmoregroup) > max_upper_fill:
# finish this group immediately
if verbose > 3:
print('stopping group: too many upper fill-ins:', n - 1 - len(newmoregroup), '>',
max_upper_fill)
break
# fill in missing harmonics:
while len(newmoregroup) < n - 1: # while some harmonics are missing ...
newmoregroup.append(-1) # ... add marker for non-existent harmonic
fill_ins += 1
# count double usage of frequency:
if n <= max_double_use_harmonics:
double_use += more_freqs[j, 4]
if verbose > 3 and more_freqs[j, 4] > 0:
print('double use of %.2fHz ' % more_freqs[j, 0], end='')
# take frequency:
newmoregroup.append(j)
if verbose > 3:
print('append')
# double use of points:
if double_use > max_double_use_count:
if verbose > 1:
print('discarded group because of double use:', double_use)
continue
# ratio of total fill-ins too large:
if float(fill_ins) / float(len(newmoregroup)) > max_fill_ratio:
if verbose > 1:
print('discarded group because of too many fill ins! %d from %d (%g)' %
(fill_ins, len(newmoregroup), float(fill_ins) / float(len(newmoregroup))), newmoregroup)
continue
# REASSEMBLE NEW GROUP BECAUSE FZERO MIGHT HAVE CHANGED AND
# CALCULATE THE PEAKSUM, GIVEN THE UPPER LIMIT
# DERIVED FROM morefreqs which can be low because of too many fill ins.
# newgroup is needed to delete the right frequencies from freqs later on.
newgroup = []
fk = 0
for j in range(len(newmoregroup)):
if newmoregroup[j] >= 0:
# existing frequency peak:
f = more_freqs[newmoregroup[j], 0]
# find this frequency in freqs:
for k in range(fk, freqs.shape[0]):
if np.abs(freqs[k, 0] - f) < 1.0e-8:
newgroup.append(k)
fk = k + 1
break
if fk >= freqs.shape[0]:
break
# fmax might not be in our group, because we adjust fzero:
if not fmaxinx in newgroup:
if verbose > 1:
print("discarded: lost fmax")
continue
n = power_n_harmonics
newmoregroup_peaksum = np.sum(more_freqs[newmoregroup[:n], 1])
fills = np.sum(np.asarray(newmoregroup[:len(best_moregroup)]) < 0)
best_fills = np.sum(np.asarray(best_moregroup[:len(newmoregroup)]) < 0)
takes = np.sum(np.asarray(newmoregroup) >= 0)
best_takes = np.sum(np.asarray(best_moregroup) >= 0)
if verbose > 1:
print('newgroup: divisor={d}, fzero={fz:.2f}Hz, peaksum={ps}, fills={f}, takes={t}'.format(d=divisor,
fz=fzero,
ps=newmoregroup_peaksum,
f=fills,
t=takes),
newgroup)
print('newmoregroup: divisor={d}, fzero={fz:.2f}Hz, peaksum={ps}, fills={f}, takes={t}'.format(d=divisor,
fz=fzero,
ps=newmoregroup_peaksum,
f=fills,
t=takes),
newmoregroup)
if verbose > 2:
print('bestgroup: divisor={d}, fzero={fz:.2f}Hz, peaksum={ps}, fills={f}, takes={t}'.format(
d=best_divisor, fz=best_fzero, ps=best_group_peaksum, f=best_fills, t=best_takes), best_group)
# TAKE THE NEW GROUP IF BETTER:
# sum of peak power must be larger and
# less fills. But if the new group has more takes,
# this might compensate for more fills.
if newmoregroup_peaksum > best_group_peaksum \
and fills - best_fills <= 0.5 * (takes - best_takes):
best_group_peaksum = newmoregroup_peaksum
if len(newgroup) == 1:
best_group_fill_ins = np.max((2, fill_ins)) # give larger groups a chance XXX we might reduce this!
else:
best_group_fill_ins = fill_ins
best_group = newgroup
best_moregroup = newmoregroup
best_divisor = divisor
best_fzero = fzero
best_fzero_harmonics = fzero_harmonics
if verbose > 2:
print('new bestgroup: divisor={d}, fzero={fz:.2f}Hz, peaksum={ps}, fills={f}, takes={t}'.format(
d=best_divisor, fz=best_fzero, ps=best_group_peaksum, f=best_fills, t=best_takes), best_group)
print('new bestmoregroup: divisor={d}, fzero={fz:.2f}Hz, peaksum={ps}, fills={f}, takes={t}'.format(
d=best_divisor, fz=best_fzero, ps=best_group_peaksum, f=best_fills, t=best_takes), best_moregroup)
elif verbose > 1:
print('took as new best group')
# ##############################################################
# no group found:
if len(best_group) == 0:
# erase fmax:
freqs = np.delete(freqs, fmaxinx, axis=0)
group = np.zeros((0, 5))
return freqs, more_freqs, group, 1, fmax
# group found:
if verbose > 2:
print('')
print('## best groups found for fmax={fm:.2f}Hz: fzero={fz:.2f}Hz, d={d:d}:'.format(fm=fmax, fz=best_fzero,
d=best_divisor))
print('## bestgroup: ', best_group, '[', ', '.join(['{:.2f}'.format(f) for f in freqs[best_group, 0]]), ']')
print('## bestmoregroup: ', best_moregroup, '[', ', '.join(
['{:.2f}'.format(f) for f in more_freqs[best_moregroup, 0]]), ']')
# fill up group:
group = np.zeros((len(best_moregroup), 5))
for i, inx in enumerate(best_moregroup):
# increment double use counter:
more_freqs[inx, 4] += 1.0
if inx >= 0:
group[i, :] = more_freqs[inx, :]
# take adjusted peak frequencies:
group[i, 0] = (i + 1) * best_fzero
if verbose > 1:
refi = np.nonzero(group[:, 1] > 0.0)[0][0]
print('')
print('# resulting harmonic group for fmax=', fmax)
for i in range(group.shape[0]):
print('{0:8.2f}Hz n={1:5.2f}: p={2:10.3g} p/p0={3:10.3g}'.format(group[i, 0], group[i, 0] / group[0, 0],
group[i, 1], group[i, 1] / group[refi, 1]))
# erase from freqs:
for inx in reversed(best_group):
freqs = np.delete(freqs, inx, axis=0)
# freqs: removed all frequencies of bestgroup
# more_freqs: updated double use count
# group: the group
# fmax: fmax
return freqs, more_freqs, group, best_fzero_harmonics, fmax
def extract_fundamentals(good_freqs, all_freqs, deltaf, verbose=0, freq_tol_fac=0.7,
mains_freq=60.0, min_freq=0.0, max_freq=2000.0,
max_divisor=4, max_upper_fill=1,
max_double_use_harmonics=8, max_double_use_count=1,
max_fill_ratio=0.25, power_n_harmonics=10,
min_group_size=3, max_harmonics=0, **kwargs):
"""Extract fundamental frequencies from power-spectrum peaks.
Args:
good_freqs (2-D numpy array): list of frequency, height, size, width and count of
strong peaks in a power spectrum.
all_freqs (2-D numpy array): list of frequency, height, size, width and count of
all peaks in a power spectrum.
deltaf (float): frequency resolution of the power spectrum
verbose (int): verbosity level
freq_tol_fac (float): harmonics need to fall within deltaf*freq_tol_fac
mains_freq (float): frequency of the mains power supply.
min_freq (float): minimum frequency accepted as a fundamental frequency
max_freq (float): maximum frequency accepted as a fundamental frequency
max_divisor (int): maximum divisor used for checking for sub-harmonics
max_upper_fill (int): maximum number of frequencies that are allowed to be filled in
(i.e. they are not contained in more_freqs) above the frequency of the
largest peak in freqs for constructing a harmonic group.
max_double_use_harmonics (int): maximum harmonics for which double uses of peaks
are counted.
max_double_use_count (int): maximum number of harmonic groups a single peak can be part of.
max_fill_ratio (float): maximum allowed fraction of filled in frequencies.
power_n_harmonics (int): maximum number of harmonics over which the total power
of the signal is computed.
min_group_size (int): minimum required number of harmonics that are not filled in and
are not part of other, so far detected, harmonics groups.
max_harmonics (int): maximum number of harmonics to be returned for each group.
Returns:
group_list (list of 2-D numpy arrays): list of all harmonic groups found sorted
by fundamental frequency.
Each harmonic group is a 2-D numpy array with the first dimension the harmonics
and the second dimension containing frequency, height, and size of each harmonic.
If the power is zero, there was no corresponding peak in the power spectrum.
fzero_harmonics_list (list of int): the harmonics from which the fundamental frequencies were computed.
mains_list (2-d array): array of mains peaks found in all_freqs (frequency, height, size)
"""
if verbose > 0:
print('')
# set double use count to zero:
all_freqs[:, 4] = 0.0
freqtol = freq_tol_fac * deltaf
# remove power line harmonics from good_freqs:
# XXX might be improved!!!
if mains_freq > 0.0:
pfreqtol = 1.0 # 1 Hz tolerance
for inx in reversed(range(len(good_freqs))):
n = np.round(good_freqs[inx, 0] / mains_freq)
nd = np.abs(good_freqs[inx, 0] - n * mains_freq)
if nd <= pfreqtol:
if verbose > 1:
print('remove power line frequency', inx, good_freqs[inx, 0], np.abs(
good_freqs[inx, 0] - n * mains_freq))
good_freqs = np.delete(good_freqs, inx, axis=0)
group_list = list()
fzero_harmonics_list = list()
# as long as there are frequencies left in good_freqs:
while good_freqs.shape[0] > 0:
# we check for harmonic groups:
good_freqs, all_freqs, harm_group, fzero_harmonics, fmax = \
build_harmonic_group(good_freqs, all_freqs, deltaf,
verbose, min_freq, max_freq, freq_tol_fac,
max_divisor, max_upper_fill,
max_double_use_harmonics, max_double_use_count,
max_fill_ratio, power_n_harmonics)
if verbose > 1:
print('')
# nothing found:
if harm_group.shape[0] == 0:
if verbose > 0:
print('Nothing found for fmax=%.2fHz' % fmax)
continue
# count number of harmonics which have been detected, are not fill-ins,
# and are not doubly used:
group_size = np.sum((harm_group[:, 1] > 0.0) & (harm_group[:, 4] < 2.0))
group_size_ok = (group_size >= min_group_size)
# check frequency range of fundamental:
fundamental_ok = (harm_group[0, 0] >= min_freq and
harm_group[0, 0] <= max_freq)
# check power hum (does this really ever happen???):
mains_ok = ((mains_freq == 0.0) |
(np.abs(harm_group[0, 0] - mains_freq) > freqtol))
# check:
if group_size_ok and fundamental_ok and mains_ok:
if verbose > 0:
print('Accepting harmonic group: {:.2f}Hz p={:10.8f}'.format(
harm_group[0, 0], np.sum(harm_group[:, 1])))
group_list.append(harm_group[:, 0:2])
fzero_harmonics_list.append(fzero_harmonics)
else:
if verbose > 0:
print('Discarded harmonic group: {:.2f}Hz p={:10.8f} g={:d} f={:} m={:}'.format(
harm_group[0, 0], np.sum(harm_group[:, 1]),
group_size, fundamental_ok, mains_ok))
# do not save more than n harmonics:
if max_harmonics > 0:
for group in group_list:
if group.shape[0] > max_harmonics:
if verbose > 1:
print('Discarding some tailing harmonics for f=%.2fHz' % group[0, 0])
group = group[:max_harmonics, :]
# sort groups by fundamental frequency:
ffreqs = [f[0, 0] for f in group_list]
finx = np.argsort(ffreqs)
group_list = [group_list[fi] for fi in finx]
fzero_harmonics_list = [fzero_harmonics_list[fi] for fi in finx]
if verbose > 0:
print('')
if len(group_list) > 0:
print('## FUNDAMENTALS FOUND: ##')
for i in range(len(group_list)):
power = group_list[i][:, 1]
print('{:8.2f}Hz: {:10.8f} {:3d} {:3d}'.format(group_list[i][0, 0], np.sum(power),
np.sum(power <= 0.0), fzero_harmonics_list[i]))
else:
print('## NO FUNDAMENTALS FOUND ##')
# assemble mains frequencies from all_freqs:
mains_list = []
if mains_freq > 0.0:
pfreqtol = 1.0
for inx in range(len(all_freqs)):
n = np.round(all_freqs[inx, 0] / mains_freq)
nd = np.abs(all_freqs[inx, 0] - n * mains_freq)
if nd <= pfreqtol:
mains_list.append(all_freqs[inx, 0:2])
return group_list, fzero_harmonics_list, np.array(mains_list)
def threshold_estimate(data, noise_factor=6.0, nbins=100, hist_height=1.0/ np.sqrt(np.e),
peak_factor=5.0):
"""Estimate noise standard deviation from histogram
for usefull peak-detection thresholds.
The standard deviation of the noise floor without peaks is estimated from
the width of the histogram of the data at hist_height relative height.
Args:
data: the data from which to estimate the thresholds
noise_factor (float): factor by which the width of the histogram is multiplied to set the low_threshold.
nbins (int or list of floats): number of bins or the bins for computing the histogram.
hist_height (float): height between 0 and 1 at which the width of the histogram is computed.
peak_factor (float): the high_threshold is the low_threshold plus
this fraction times the distance between largest pe aks
and low_threshold plus half the low_threshold
Returns:
low_threshold (float): the threshold just above the noise floor
high_threshold (float): the threshold for clear peaks
center: (float): estimate of the median of the data without peaks
"""
# estimate noise standard deviation:
hist, bins = np.histogram(data, nbins, density=True)
inx = hist > np.max(hist) * hist_height
lower = bins[0:-1][inx][0]
upper = bins[1:][inx][-1] # needs to return the next bin
center = 0.5 * (lower + upper)
noise_std = 0.5 * (upper - lower)
lowthreshold = noise_std * noise_factor
# high threshold:
lowerth = center + 0.5 * lowthreshold
cumhist = np.cumsum(hist) / np.sum(hist)
upperpthresh = 0.95
if bins[-2] >= lowerth:
pthresh = cumhist[bins[:-1] >= lowerth][0]
upperpthresh = pthresh + 0.95 * (1.0 - pthresh)
upperbins = bins[:-1][cumhist > upperpthresh]
if len(upperbins) > 0:
upperth = upperbins[0]
else:
upperth = bins[-1]
highthreshold = lowthreshold + peak_factor * noise_std
if upperth > lowerth + 0.1 * noise_std:
highthreshold = lowerth + peak_factor * (upperth - lowerth) + 0.5 * lowthreshold - center
return lowthreshold, highthreshold, center
def harmonic_groups(psd_freqs, psd, verbose=0, low_threshold=0.0, high_threshold=0.0,
thresh_bins=100, noise_fac=6.0, peak_fac=0.5,
max_peak_width_fac=3.5, min_peak_width=1.0,
freq_tol_fac=0.7, mains_freq=60.0, min_freq=0.0, max_freq=2000.0,
max_work_freq=4000.0, max_divisor=4, max_upper_fill=1,
max_double_use_harmonics=8, max_double_use_count=1,
max_fill_ratio=0.25, power_n_harmonics=10,
min_group_size=3, max_harmonics=0, **kwargs):
"""Detect peaks in power spectrum and extract fundamentals of harmonic groups.
Args:
psd_freqs (array): frequencies of the power spectrum
psd (array): power spectrum (linear, not decible)
verbose (int): verbosity level
low_threshold (float): the relative threshold for detecting all peaks
in the decibel spectrum.
high_threshold (float): the relative threshold for detecting good peaks
in the decibel spectrum
thresh_bins (int or list of floats): number of bins or the bins for computing the histogram
from which the standard deviation of the noise level in the psd is estimated.
noise_factor (float): multiplies the estimate of the standard deviation
of the noise to result in the low_threshold
peak_factor (float): the high_threshold is the low_threshold plus
this fraction times the distance between largest peaks
and low_threshold plus half the low_threshold
max_peak_width_fac (float): the maximum allowed width of a good peak
in the decibel power spectrum in multiples of
the frequency resolution.
min_peak_width (float): the minimum absolute value for the maximum width
of a peak in Hertz.
freq_tol_fac (float): harmonics need to fall within deltaf*freq_tol_fac
mains_freq (float): frequency of the mains power supply.
min_freq (float): minimum frequency accepted as a fundamental frequency
max_freq (float): maximum frequency accepted as a fundamental frequency
max_work_freq (float): maximum frequency to be used for strong ("good") peaks
max_divisor (int): maximum divisor used for checking for sub-harmonics
max_upper_fill (int): maximum number of frequencies that are allowed to be filled in
(i.e. they are not contained in more_freqs) above the frequency of the
largest peak in freqs for constructing a harmonic group.
max_double_use_harmonics (int): maximum harmonics for which double uses of peaks
are counted.
max_double_use_count (int): maximum number of harmonic groups a single peak can be part of.
max_fill_ratio (float): maximum allowed fraction of filled in frequencies.
power_n_harmonics (int): maximum number of harmonics over which the total power
of the signal is computed.
min_group_size (int): minimum required number of harmonics that are not filled in and
are not part of other, so far detected, harmonics groups.
max_harmonics (int): maximum number of harmonics to be returned for each group.
Returns:
group_list (list of 2-D numpy arrays): list of all extracted harmonic groups, sorted
by fundamental frequency.
Each harmonic group is a 2-D numpy array with the first dimension the harmonics
and the second dimension containing frequency, height, and size of each harmonic.
If the power is zero, there was no corresponding peak in the power spectrum.
fzero_harmonics (list of ints) : The harmonics from
which the fundamental frequencies were computed.
mains (2-d array): frequencies and power of multiples of the mains frequency found in the power spectrum.
all_freqs (2-d array): peaks in the power spectrum detected with low threshold
[frequency, power, size, width, double use count].
good_freqs (1-d array): frequencies of peaks detected with high threshold.
low_threshold (float): the relative threshold for detecting all peaks in the decibel spectrum.
high_threshold (float): the relative threshold for detecting good peaks in the decibel spectrum.
center (float): the baseline level of the power spectrum.
"""
if verbose > 0:
print('')
print(70 * '#')
print('##### harmonic_groups', 48 * '#')
# decibel power spectrum:
log_psd = decibel(psd)
# thresholds:
center = np.NaN
if low_threshold <= 0.0 or high_threshold <= 0.0:
n = len(log_psd)
low_threshold, high_threshold, center = threshold_estimate(log_psd[2 * n // 3:n * 9 // 10],
noise_fac, thresh_bins,
peak_factor=peak_fac)
if verbose > 1:
print('')
print('low_threshold=', low_threshold, center + low_threshold)
print('high_threshold=', high_threshold, center + high_threshold)
print('center=', center)
# detect peaks in decibel power spectrum:
all_freqs, _ = detect_peaks(log_psd, low_threshold, psd_freqs,
accept_peaks_size_width)
if len(all_freqs) == 0:
# TODO: Why has not been a peak detected?
return [], [], [], np.zeros((0, 5)), [], low_threshold, high_threshold, center
# select good peaks:
wthresh = max_peak_width_fac * (psd_freqs[1] - psd_freqs[0])
if wthresh < min_peak_width:
wthresh = min_peak_width
freqs = all_freqs[(all_freqs[:, 2] > high_threshold) &
(all_freqs[:, 0] >= min_freq) &
(all_freqs[:, 0] <= max_work_freq) &
(all_freqs[:, 3] < wthresh), :]
# convert peak sizes back to power:
freqs[:, 1] = 10.0 ** (0.1 * freqs[:, 1])
all_freqs[:, 1] = 10.0 ** (0.1 * all_freqs[:, 1])
# detect harmonic groups:
groups, fzero_harmonics, mains = extract_fundamentals(freqs, all_freqs,
psd_freqs[1] - psd_freqs[0],
verbose, freq_tol_fac,
mains_freq, min_freq, max_freq,
max_divisor, max_upper_fill,
max_double_use_harmonics,
max_double_use_count,max_fill_ratio,
power_n_harmonics, min_group_size,
max_harmonics)
return groups, fzero_harmonics, mains, all_freqs, freqs[:, 0], low_threshold, high_threshold, center
def fundamental_freqs(group_list):
"""
Extract the fundamental frequencies from lists of harmonic groups.
Args:
group_list (list of 2-D arrays or list of list of 2-D arrays):
Lists of harmonic groups as returned by extract_fundamentals() and
harmonic_groups() with the element [0][0] of the
harmonic groups being the fundamental frequency.
Returns:
fundamentals (1-D array or list of 1-D array):
single array or list of arrays (corresponding to the input group_list)
of the fundamental frequencies.
"""
if len(group_list) == 0:
fundamentals = np.array([])
elif hasattr(group_list[0][0][0], '__len__'):
fundamentals = []
for groups in group_list:
fundamentals.append(np.array([harmonic_group[0][0] for harmonic_group in groups]))
else:
fundamentals = np.array([harmonic_group[0][0] for harmonic_group in group_list])
return fundamentals
def fundamental_freqs_and_db(group_list):
"""
Extract the fundamental frequencies and their power in dB from lists of harmonic groups.
Parameters
----------
group_list: list of 2-D arrays or list of list of 2-D arrays
Lists of harmonic groups as returned by extract_fundamentals() and
harmonic_groups() with the element [0][0] of the harmonic groups being the fundamental frequency,
and element[0][1] being the corresponding power.
Returns
-------
eodf_db_matrix: 2-D array or list of 2-D arrays
Matrix with fundamental frequencies in first column and corresponding power in dB in second column.
"""
if len(group_list) == 0:
eodf_db_matrix = np.array([])
elif hasattr(group_list[0][0][0], '__len__'):
eodf_db_matrix = []
for groups in group_list:
f = [np.array([harmonic_group[0][0], harmonic_group[0][1]]) for harmonic_group in group_list]
f[:, 1] = decibel(f[:, 1]) # calculate decibel using 1 as reference power
eodf_db_matrix.append(f)
else:
eodf_db_matrix = np.array([np.array([harmonic_group[0][0], harmonic_group[0][1]])
for harmonic_group in group_list])
eodf_db_matrix[:, 1] = decibel(eodf_db_matrix[:, 1]) # calculate decibel using 1 as reference power
return eodf_db_matrix
def colors_markers():
"""
Generate a list of colors and markers for plotting.
Returns:
--------
colors: list
list of colors
markers: list
list of markers
"""
# color and marker range:
colors = []
markers = []
mr2 = []
# first color range:
cc0 = plt.cm.gist_rainbow(np.linspace(0.0, 1.0, 8.0))
# shuffle it:
for k in range((len(cc0) + 1) // 2):
colors.extend(cc0[k::(len(cc0) + 1) // 2])
markers.extend(len(cc0) * 'o')
mr2.extend(len(cc0) * 'v')
# second darker color range:
cc1 = plt.cm.gist_rainbow(np.linspace(0.33 / 7.0, 1.0, 7.0))
cc1 = mc.hsv_to_rgb(mc.rgb_to_hsv(np.array([cc1[:, :3]])) * np.array([1.0, 0.9, 0.7]))[0]
cc1 = np.hstack((cc1, np.ones((len(cc1),1))))
# shuffle it:
for k in range((len(cc1) + 1) // 2):
colors.extend(cc1[k::(len(cc1) + 1) // 2])
markers.extend(len(cc1) * '^')
mr2.extend(len(cc1) * '*')
# third lighter color range:
cc2 = plt.cm.gist_rainbow(np.linspace(0.67 / 6.0, 1.0, 6.0))
cc2 = mc.hsv_to_rgb(mc.rgb_to_hsv(np.array([cc1[:, :3]])) * np.array([1.0, 0.5, 1.0]))[0]
cc2 = np.hstack((cc2, np.ones((len(cc2),1))))
# shuffle it:
for k in range((len(cc2) + 1) // 2):
colors.extend(cc2[k::(len(cc2) + 1) // 2])
markers.extend(len(cc2) * 'D')
mr2.extend(len(cc2) * 'x')
markers.extend(mr2)
return colors, markers
def plot_harmonic_groups(ax, group_list, max_groups=0, sort_by_freq=True,
colors=None, markers=None, legend_rows=8, **kwargs):
"""
Mark decibel power of fundamentals and their harmonics in a plot.
Args:
-----
ax: axis for plot
Axis used for plotting.
group_list: list of 2-D arrays
Lists of harmonic groups as returned by extract_fundamentals() and
harmonic_groups() with the element [0, 0] of the harmonic groups being the fundamental frequency,
and element[0, 1] being the corresponding power.
max_groups: int
If not zero plot only the max_groups powerful groups.
sort_by_freq: boolean
If True sort legend by frequency, otherwise by power.
colors: list of colors or None
If not None list of colors for plotting each group
markers: list of markers or None
If not None list of markers for plotting each group
legend_rows: int
Maximum number of rows to be used for the legend.
kwargs:
Key word arguments for the legend of the plot.
"""
if len(group_list) == 0:
return
# sort by power:
power = np.array([np.sum(fish[:10, 1]) for fish in group_list])
max_power = np.max(power)
idx_maxpower = np.argsort(power)
if max_groups > 0 and len(idx_maxpower > max_groups):
idx_maxpower = idx_maxpower[-max_groups:]
idx = np.array(list(reversed(idx_maxpower)))
# sort by frequency:
if sort_by_freq:
freqs = [group_list[group][0, 0] for group in idx]
idx = idx[np.argsort(freqs)]
# plot:
for k, i in enumerate(idx):
group = group_list[i]
x = np.array([harmonic[0] for harmonic in group])
y = np.array([harmonic[1] for harmonic in group])
msize = 7.0 + 10.0 * (power[i] / max_power) ** 0.25
color_kwargs = {}
if colors is not None:
color_kwargs = {'color': colors[k%len(colors)]}
if markers is None:
ax.plot(x, decibel(y), 'o', ms=msize, label='%.1f Hz' % group[0, 0], **color_kwargs)
else:
if k >= len(markers):
break
ax.plot(x, decibel(y), linestyle='None', marker=markers[k], mec=None, mew=0.0,
ms=msize, label='%.1f Hz' % group[0, 0], **color_kwargs)
# legend:
if legend_rows > 0:
ncol = (len(idx)-1) // legend_rows + 1
ax.legend(numpoints=1, ncol=ncol, **kwargs)
else:
ax.legend(numpoints=1, **kwargs)
def plot_psd_harmonic_groups(ax, psd_freqs, psd, group_list, mains=None, all_freqs=None, good_freqs=None,
max_freq=2000.0):
"""
Plot decibel power-spectrum with detected peaks, harmonic groups, and mains frequencies.
Parameters:
-----------
psd_freqs: array
Frequencies of the power spectrum.
psd: array
Power spectrum (linear, not decible).
group_list: list of 2-D arrays
Lists of harmonic groups as returned by extract_fundamentals() and
harmonic_groups() with the element [0, 0] of the harmonic groups being the fundamental frequency,
and element[0, 1] being the corresponding power.
mains: 2-D array
Frequencies and power of multiples of the mains frequency found in the power spectrum.
all_freqs: 2-D array
Peaks in the power spectrum detected with low threshold.
good_freqs: 1-D array
Frequencies of peaks detected with high threshold.
max_freq: float
Limits of frequency axis are set to (0, max_freq) if max_freq is greater than zero.
"""
# mark all and good psd peaks:
pmin, pmax = ax.get_ylim()
doty = pmax - 5.0
if all_freqs is not None:
ax.plot(all_freqs[:, 0], np.zeros(len(all_freqs[:, 0])) + doty, 'o', color='#ffffff')
if good_freqs is not None:
ax.plot(good_freqs, np.zeros(len(good_freqs)) + doty, 'o', color='#888888')
# mark mains frequencies:
if mains is not None and len(mains) > 0:
fpeaks = mains[:, 0]
fpeakinx = [np.round(fp/(psd_freqs[1]-psd_freqs[0])) for fp in fpeaks if fp < psd_freqs[-1]]
ax.plot(fpeaks[:len(fpeakinx)], decibel(psd[fpeakinx]), linestyle='None',
marker='.', color='k', ms=10, mec=None, mew=0.0,
label='%3.0f Hz mains' % mains[0, 0])
# mark harmonic groups:
colors, markers = colors_markers()
plot_harmonic_groups(ax, group_list, max_groups=0, sort_by_freq=True,
colors=colors, markers=markers, legend_rows=8,
loc='upper right')
# plot power spectrum:
plot_decibel_psd(ax, psd_freqs, psd, max_freq=max_freq, color='blue')
def add_psd_peak_detection_config(cfg, low_threshold=0.0, high_threshold=0.0,
thresh_bins=100, noise_fac=6.0, peak_fac=0.5,
max_peak_width_fac=3.5, min_peak_width=1.0):
""" Add parameter needed for detection of peaks in power spectrum used by
harmonic_groups() as a new section to a configuration.
Args:
cfg (ConfigFile): the configuration
"""
cfg.add_section('Thresholds for peak detection in power spectra:')
cfg.add('lowThreshold', low_threshold, 'dB', 'Threshold for all peaks.\n If 0.0 estimate threshold from histogram.')
cfg.add('highThreshold', high_threshold, 'dB', 'Threshold for good peaks. If 0.0 estimate threshold from histogram.')
# cfg['lowThreshold'][0] = 12. # panama
# cfg['highThreshold'][0] = 18. # panama
cfg.add_section('Threshold estimation:\nIf no thresholds are specified they are estimated from the histogram of the decibel power spectrum.')
cfg.add('thresholdBins', thresh_bins, '', 'Number of bins used to compute the histogram used for threshold estimation.')
cfg.add('noiseFactor', noise_fac, '', 'Factor for multiplying std of noise floor for lower threshold.')
cfg.add('peakFactor', peak_fac, '', 'Fractional position of upper threshold above lower threshold.')
cfg.add_section('Peak detection in decibel power spectrum:')
cfg.add('maxPeakWidthFac', max_peak_width_fac, '',
'Maximum width of peaks at 0.75 hight in multiples of frequency resolution (might be increased)')
cfg.add('minPeakWidth', min_peak_width, 'Hz', 'Peaks do not need to be narrower than this.')
def psd_peak_detection_args(cfg):
""" Translates a configuration to the respective parameter names for the
detection of peaks in power spectrum used by harmonic_groups().
The return value can then be passed as key-word arguments to this function.
Args:
cfg (ConfigFile): the configuration
Returns:
a (dict): dictionary with names of arguments of the harmonic-group()
function and their values as supplied by cfg.
"""
return cfg.map({'low_threshold': 'lowThreshold',
'high_threshold': 'highThreshold',
'thresh_bins': 'thresholdBins',
'noise_fac': 'noiseFactor',
'peak_fac': 'peakFactor',
'max_peak_width_fac': 'maxPeakWidthFac',
'min_peak_width': 'minPeakWidth'})
def add_harmonic_groups_config(cfg, mains_freq=60.0, max_divisor=4, freq_tol_fac=0.7,
max_upper_fill=1, max_fill_ratio=0.25,
max_double_use_harmonics=8, max_double_use_count=1,
power_n_harmonics=10, min_group_size=3,
min_freq=20.0, max_freq=2000.0, max_work_freq=4000.0,
max_harmonics=0):
""" Add parameter needed for detection of harmonic groups as
a new section to a configuration.
Args:
cfg (ConfigFile): the configuration
"""
cfg.add_section('Harmonic groups:')
cfg.add('mainsFreq', mains_freq, 'Hz', 'Mains frequency to be excluded.')
cfg.add('maxDivisor', max_divisor, '', 'Maximum ratio between the frequency of the largest peak and its fundamental')
cfg.add('freqTolerance', freq_tol_fac, '',
'Harmonics need be within this factor times the frequency resolution of the power spectrum. Needs to be higher than 0.5!')
cfg.add('maxUpperFill', max_upper_fill, '',
'As soon as more than this number of harmonics need to be filled in conescutively stop searching for higher harmonics.')
cfg.add('maxFillRatio', max_fill_ratio, '',
'Maximum fraction of filled in harmonics allowed (usefull values are smaller than 0.5)')
cfg.add('maxDoubleUseHarmonics', max_double_use_harmonics, '', 'Maximum harmonics up to which double uses are penalized.')
cfg.add('maxDoubleUseCount', max_double_use_count, '', 'Maximum overall double use count allowed.')
cfg.add('powerNHarmonics', power_n_harmonics, '', 'Compute total power over the first # harmonics.')
cfg.add_section('Acceptance of best harmonic groups:')
cfg.add('minimumGroupSize', min_group_size, '',
'Minimum required number of harmonics (inclusively fundamental) that are not filled in and are not used by other groups.')
# cfg['minimumGroupSize'][0] = 2 # panama
cfg.add('minimumFrequency', min_freq, 'Hz', 'Minimum frequency allowed for the fundamental.')
cfg.add('maximumFrequency', max_freq, 'Hz', 'Maximum frequency allowed for the fundamental.')
cfg.add('maximumWorkingFrequency', max_work_freq, 'Hz',
'Maximum frequency to be used to search for harmonic groups and to adjust fundamental frequency.')
cfg.add('maxHarmonics', max_harmonics, '', '0: keep all, >0 only keep the first # harmonics.')
def harmonic_groups_args(cfg):
""" Translates a configuration to the
respective parameter names of the harmonic-group detection functions.
The return value can then be passed as key-word arguments to this function.
Args:
cfg (ConfigFile): the configuration
Returns:
a (dict): dictionary with names of arguments of the harmonic-group detection
functions and their values as supplied by cfg.
"""
return cfg.map({'mains_freq': 'mainsFreq',
'max_divisor': 'maxDivisor',
'freq_tol_fac': 'freqTolerance',
'max_upper_fill': 'maxUpperFill',
'max_fill_ratio': 'maxFillRatio',
'max_double_use_harmonics': 'maxDoubleUseHarmonics',
'max_double_use_count': 'maxDoubleUseCount',
'power_n_harmonics': 'powerNHarmonics',
'min_group_size': 'minimumGroupSize',
'min_freq': 'minimumFrequency',
'max_freq': 'maximumFrequency',
'max_work_freq': 'maximumWorkingFrequency',
'max_harmonics': 'maxHarmonics'})
if __name__ == "__main__":
print("Checking harmonicgroups module ...")
from .fakefish import generate_wavefish
from .powerspectrum import psd
# generate data:
samplerate = 44100.0
eodfs = [123.0, 321.0, 666.0, 668.0]
fish1 = generate_wavefish(eodfs[0], samplerate, duration=8.0, noise_std=0.01,
amplitudes=[1.0, 0.5, 0.2, 0.1, 0.05], phases=[0.0, 0.0, 0.0, 0.0, 0.0])
fish2 = generate_wavefish(eodfs[1], samplerate, duration=8.0, noise_std=0.01,
amplitudes=[1.0, 0.7, 0.2, 0.1], phases=[0.0, 0.0, 0.0, 0.0])
fish3 = generate_wavefish(eodfs[2], samplerate, duration=8.0, noise_std=0.01,
amplitudes=[10.0, 5.0, 1.0], phases=[0.0, 0.0, 0.0])
fish4 = generate_wavefish(eodfs[3], samplerate, duration=8.0, noise_std=0.01,
amplitudes=[6.0, 3.0, 1.0], phases=[0.0, 0.0, 0.0])
data = fish1 + fish2 + fish3 + fish4
# analyse:
psd_data = psd(data, samplerate, fresolution=0.5)
groups, _, mains, all_freqs, good_freqs, _, _, _ = harmonic_groups(psd_data[1], psd_data[0])
fundamentals = fundamental_freqs(groups)
print(fundamentals)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
plot_psd_harmonic_groups(ax, psd_data[1], psd_data[0], groups, mains, all_freqs, good_freqs,
max_freq=3000.0)
plt.show()
|
gpl-3.0
|
mjudsp/Tsallis
|
sklearn/linear_model/logistic.py
|
5
|
68252
|
"""
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from .sag import sag_solver
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
softmax, squared_norm)
from ..utils.extmath import row_norms
from ..utils.optimize import newton_cg
from ..utils.validation import check_X_y
from ..exceptions import DataConversionWarning
from ..exceptions import NotFittedError
from ..utils.fixes import expit
from ..utils.multiclass import check_classification_targets
from ..externals.joblib import Parallel, delayed
from ..model_selection import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
Returns
-------
w : ndarray, shape (n_features,)
Coefficient vector without the intercept weight (w[-1]) if the
intercept should be fit. Unchanged otherwise.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Unchanged.
yz : float
y * np.dot(X, w).
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
yz = y * z
return w, c, yz
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(n_samples)
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual):
if solver not in ['liblinear', 'newton-cg', 'lbfgs', 'sag']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg, lbfgs and sag solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=False,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
copy : bool, default False
Whether or not to produce a copy of the data. A copy is not required
anymore. This parameter is deprecated and will be removed in 0.19.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if copy:
warnings.warn("A copy is not required anymore. The 'copy' parameter "
"is deprecated and will be removed in 0.19.",
DeprecationWarning)
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual)
# Preprocessing.
if check_input or copy:
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=np.float64, order='C')
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones(X.shape[0])
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg or sag solvers or set "
"class_weight='balanced'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=np.float64)
y_bin[~mask] = -1.
# for compute_class_weight
# 'auto' is deprecated and will be removed in 0.19
if class_weight in ("auto", "balanced"):
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
if solver != 'sag':
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
else:
# SAG multinomial solver needs LabelEncoder, not LabelBinarizer
le = LabelEncoder()
Y_multi = le.fit_transform(y)
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
order='F')
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if (coef.shape[0] != n_classes or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
if solver in ['lbfgs', 'newton-cg']:
w0 = w0.ravel()
target = Y_multi
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
try:
n_iter_i = info['nit'] - 1
except:
n_iter_i = info['funcalls'] - 1
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver == 'sag':
if multi_class == 'multinomial':
target = target.astype(np.float64)
loss = 'multinomial'
else:
loss = 'log'
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, loss, 1. / C, max_iter, tol,
verbose, random_state, False, max_squared_sum, warm_start_sag)
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return coefs, np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, intercept_scaling=1.,
multi_class='ovr', random_state=None,
max_squared_sum=None, sample_weight=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solver.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
n_iter : array, shape(n_cs,)
Actual number of iteration for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = sample_weight[train]
coefs, Cs, n_iter = logistic_regression_path(
X_train, y_train, Cs=Cs, fit_intercept=fit_intercept,
solver=solver, max_iter=max_iter, class_weight=class_weight,
pos_class=pos_class, multi_class=multi_class,
tol=tol, verbose=verbose, dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling, random_state=random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.
# To deal with object dtypes, we need to convert into an array of floats.
y_test = check_array(y_test, dtype=np.float64, ensure_2d=False)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr', and uses the cross-
entropy loss if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs',
'sag' and 'newton-cg' solvers.)
This class implements regularized logistic regression using the
'liblinear' library, 'newton-cg', 'sag' and 'lbfgs' solvers. It can handle
both dense and sparse input. Use C-ordered arrays or CSR matrices
containing 64-bit floats for optimal performance; any other input format
will be converted (and copied).
The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization
with primal formulation. The 'liblinear' solver supports both L1 and L2
regularization, with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2', default: 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
dual : bool, default: False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, default: 1.0
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', default: None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
*class_weight='balanced'* instead of deprecated
*class_weight='auto'*.
max_iter : int, default: 100
Useful only for the newton-cg, sag and lbfgs solvers.
Maximum number of iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, default: None
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}, default: 'liblinear'
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag' and 'lbfgs' handle
multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float, default: 1e-4
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}, default: 'ovr'
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'newton-cg',
'sag' and 'lbfgs' solver.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
verbose : int, default: 0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, default: False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver.
.. versionadded:: 0.17
*warm_start* to support *lbfgs*, *newton-cg*, *sag* solvers.
n_jobs : int, default: 1
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : array, shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0, warm_start=False, n_jobs=1):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
.. versionadded:: 0.17
*sample_weight* support to LogisticRegression.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
n_samples, n_features = X.shape
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if self.solver == 'liblinear':
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state,
sample_weight=sample_weight)
self.n_iter_ = np.array([n_iter_])
return self
if self.solver == 'sag':
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver, copy=False,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for (class_, warm_start_coef_) in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
if self.multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
if not hasattr(self, "coef_"):
raise NotFittedError("Call fit before prediction")
calculate_ovr = self.coef_.shape[0] == 1 or self.multi_class == "ovr"
if calculate_ovr:
return super(LogisticRegression, self)._predict_proba_lr(X)
else:
return softmax(self.decision_function(X), copy=False)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
class_weight == 'balanced'
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.model_selection` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag' and 'lbfgs' handle
multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
- 'liblinear' might be slower in LogisticRegressionCV because it does
not handle warm-starting.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'newton-cg',
'sag' and 'lbfgs' solver.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
n_iter_ : array, shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr',
random_state=None):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
if self.solver == 'sag':
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
check_classification_targets(y)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, y, classifier=True)
folds = list(cv.split(X, y))
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight in
['balanced', 'auto']):
# 'auto' is deprecated and will be removed in 0.19
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
# compute the class weights for the entire dataset y
if self.class_weight in ("auto", "balanced"):
classes = np.unique(y)
class_weight = compute_class_weight(self.class_weight, classes, y)
class_weight = dict(zip(classes, class_weight))
else:
class_weight = self.class_weight
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores, n_iter_ = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
self.n_iter_ = np.reshape(n_iter_, (1, len(folds),
len(self.Cs_)))
else:
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.n_iter_ = np.reshape(n_iter_, (n_classes, len(folds),
len(self.Cs_)))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty, copy=False,
class_weight=class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
|
bsd-3-clause
|
terkkila/scikit-learn
|
examples/datasets/plot_iris_dataset.py
|
283
|
1928
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
|
bsd-3-clause
|
Vvucinic/Wander
|
venv_2_7/lib/python2.7/site-packages/pandas/tseries/tests/test_offsets.py
|
9
|
182965
|
import os
from datetime import date, datetime, timedelta
from dateutil.relativedelta import relativedelta
from pandas.compat import range, iteritems
from pandas import compat
import nose
from nose.tools import assert_raises
import numpy as np
from pandas.core.datetools import (
bday, BDay, CDay, BQuarterEnd, BMonthEnd, BusinessHour,
CBMonthEnd, CBMonthBegin,
BYearEnd, MonthEnd, MonthBegin, BYearBegin, CustomBusinessDay,
QuarterBegin, BQuarterBegin, BMonthBegin, DateOffset, Week,
YearBegin, YearEnd, Hour, Minute, Second, Day, Micro, Milli, Nano, Easter,
WeekOfMonth, format, ole2datetime, QuarterEnd, to_datetime, normalize_date,
get_offset, get_offset_name, get_standard_freq)
from pandas import Series
from pandas.tseries.frequencies import _offset_map, get_freq_code, _get_freq_str
from pandas.tseries.index import _to_m8, DatetimeIndex, _daterange_cache, date_range
from pandas.tseries.tools import parse_time_string, DateParseError
import pandas.tseries.offsets as offsets
from pandas.io.pickle import read_pickle
from pandas.tslib import NaT, Timestamp, Timedelta
import pandas.tslib as tslib
from pandas.util.testing import assertRaisesRegexp
import pandas.util.testing as tm
from pandas.tseries.offsets import BusinessMonthEnd, CacheableOffset, \
LastWeekOfMonth, FY5253, FY5253Quarter, WeekDay
from pandas.tseries.holiday import USFederalHolidayCalendar
_multiprocess_can_split_ = True
def test_monthrange():
import calendar
for y in range(2000, 2013):
for m in range(1, 13):
assert tslib.monthrange(y, m) == calendar.monthrange(y, m)
####
## Misc function tests
####
def test_format():
actual = format(datetime(2008, 1, 15))
assert actual == '20080115'
def test_ole2datetime():
actual = ole2datetime(60000)
assert actual == datetime(2064, 4, 8)
assert_raises(ValueError, ole2datetime, 60)
def test_to_datetime1():
actual = to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
actual = to_datetime('20080115')
assert actual == datetime(2008, 1, 15)
# unparseable
s = 'Month 1, 1999'
assert to_datetime(s, errors='ignore') == s
def test_normalize_date():
actual = normalize_date(datetime(2007, 10, 1, 1, 12, 5, 10))
assert actual == datetime(2007, 10, 1)
def test_to_m8():
valb = datetime(2007, 10, 1)
valu = _to_m8(valb)
tm.assertIsInstance(valu, np.datetime64)
# assert valu == np.datetime64(datetime(2007,10,1))
# def test_datetime64_box():
# valu = np.datetime64(datetime(2007,10,1))
# valb = _dt_box(valu)
# assert type(valb) == datetime
# assert valb == datetime(2007,10,1)
#####
### DateOffset Tests
#####
class Base(tm.TestCase):
_offset = None
_offset_types = [getattr(offsets, o) for o in offsets.__all__]
timezones = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern',
'dateutil/Asia/Tokyo', 'dateutil/US/Pacific']
@property
def offset_types(self):
return self._offset_types
def _get_offset(self, klass, value=1, normalize=False):
# create instance from offset class
if klass is FY5253 or klass is FY5253Quarter:
klass = klass(n=value, startingMonth=1, weekday=1,
qtr_with_extra_week=1, variation='last',
normalize=normalize)
elif klass is LastWeekOfMonth:
klass = klass(n=value, weekday=5, normalize=normalize)
elif klass is WeekOfMonth:
klass = klass(n=value, week=1, weekday=5, normalize=normalize)
elif klass is Week:
klass = klass(n=value, weekday=5, normalize=normalize)
elif klass is DateOffset:
klass = klass(days=value, normalize=normalize)
else:
try:
klass = klass(value, normalize=normalize)
except:
klass = klass(normalize=normalize)
return klass
def test_apply_out_of_range(self):
if self._offset is None:
return
# try to create an out-of-bounds result timestamp; if we can't create the offset
# skip
try:
if self._offset is BusinessHour:
# Using 10000 in BusinessHour fails in tz check because of DST difference
offset = self._get_offset(self._offset, value=100000)
else:
offset = self._get_offset(self._offset, value=10000)
result = Timestamp('20080101') + offset
self.assertIsInstance(result, datetime)
self.assertIsNone(result.tzinfo)
tm._skip_if_no_pytz()
tm._skip_if_no_dateutil()
# Check tz is preserved
for tz in self.timezones:
t = Timestamp('20080101', tz=tz)
result = t + offset
self.assertIsInstance(result, datetime)
self.assertEqual(t.tzinfo, result.tzinfo)
except (tslib.OutOfBoundsDatetime):
raise
except (ValueError, KeyError) as e:
raise nose.SkipTest("cannot create out_of_range offset: {0} {1}".format(str(self).split('.')[-1],e))
class TestCommon(Base):
def setUp(self):
# exected value created by Base._get_offset
# are applied to 2011/01/01 09:00 (Saturday)
# used for .apply and .rollforward
self.expecteds = {'Day': Timestamp('2011-01-02 09:00:00'),
'DateOffset': Timestamp('2011-01-02 09:00:00'),
'BusinessDay': Timestamp('2011-01-03 09:00:00'),
'CustomBusinessDay': Timestamp('2011-01-03 09:00:00'),
'CustomBusinessMonthEnd': Timestamp('2011-01-31 09:00:00'),
'CustomBusinessMonthBegin': Timestamp('2011-01-03 09:00:00'),
'MonthBegin': Timestamp('2011-02-01 09:00:00'),
'BusinessMonthBegin': Timestamp('2011-01-03 09:00:00'),
'MonthEnd': Timestamp('2011-01-31 09:00:00'),
'BusinessMonthEnd': Timestamp('2011-01-31 09:00:00'),
'YearBegin': Timestamp('2012-01-01 09:00:00'),
'BYearBegin': Timestamp('2011-01-03 09:00:00'),
'YearEnd': Timestamp('2011-12-31 09:00:00'),
'BYearEnd': Timestamp('2011-12-30 09:00:00'),
'QuarterBegin': Timestamp('2011-03-01 09:00:00'),
'BQuarterBegin': Timestamp('2011-03-01 09:00:00'),
'QuarterEnd': Timestamp('2011-03-31 09:00:00'),
'BQuarterEnd': Timestamp('2011-03-31 09:00:00'),
'BusinessHour': Timestamp('2011-01-03 10:00:00'),
'WeekOfMonth': Timestamp('2011-01-08 09:00:00'),
'LastWeekOfMonth': Timestamp('2011-01-29 09:00:00'),
'FY5253Quarter': Timestamp('2011-01-25 09:00:00'),
'FY5253': Timestamp('2011-01-25 09:00:00'),
'Week': Timestamp('2011-01-08 09:00:00'),
'Easter': Timestamp('2011-04-24 09:00:00'),
'Hour': Timestamp('2011-01-01 10:00:00'),
'Minute': Timestamp('2011-01-01 09:01:00'),
'Second': Timestamp('2011-01-01 09:00:01'),
'Milli': Timestamp('2011-01-01 09:00:00.001000'),
'Micro': Timestamp('2011-01-01 09:00:00.000001'),
'Nano': Timestamp(np.datetime64('2011-01-01T09:00:00.000000001Z'))}
def test_return_type(self):
for offset in self.offset_types:
offset = self._get_offset(offset)
# make sure that we are returning a Timestamp
result = Timestamp('20080101') + offset
self.assertIsInstance(result, Timestamp)
# make sure that we are returning NaT
self.assertTrue(NaT + offset is NaT)
self.assertTrue(offset + NaT is NaT)
self.assertTrue(NaT - offset is NaT)
self.assertTrue((-offset).apply(NaT) is NaT)
def test_offset_n(self):
for offset_klass in self.offset_types:
offset = self._get_offset(offset_klass)
self.assertEqual(offset.n, 1)
neg_offset = offset * -1
self.assertEqual(neg_offset.n, -1)
mul_offset = offset * 3
self.assertEqual(mul_offset.n, 3)
def test_offset_freqstr(self):
for offset_klass in self.offset_types:
offset = self._get_offset(offset_klass)
freqstr = offset.freqstr
if freqstr not in ('<Easter>', "<DateOffset: kwds={'days': 1}>",
'LWOM-SAT', ):
code = get_offset(freqstr)
self.assertEqual(offset.rule_code, code)
def _check_offsetfunc_works(self, offset, funcname, dt, expected,
normalize=False):
offset_s = self._get_offset(offset, normalize=normalize)
func = getattr(offset_s, funcname)
result = func(dt)
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, expected)
result = func(Timestamp(dt))
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, expected)
# test nano second is preserved
result = func(Timestamp(dt) + Nano(5))
self.assertTrue(isinstance(result, Timestamp))
if normalize is False:
self.assertEqual(result, expected + Nano(5))
else:
self.assertEqual(result, expected)
if isinstance(dt, np.datetime64):
# test tz when input is datetime or Timestamp
return
tm._skip_if_no_pytz()
tm._skip_if_no_dateutil()
for tz in self.timezones:
expected_localize = expected.tz_localize(tz)
tz_obj = tslib.maybe_get_tz(tz)
dt_tz = tslib._localize_pydatetime(dt, tz_obj)
result = func(dt_tz)
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, expected_localize)
result = func(Timestamp(dt, tz=tz))
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, expected_localize)
# test nano second is preserved
result = func(Timestamp(dt, tz=tz) + Nano(5))
self.assertTrue(isinstance(result, Timestamp))
if normalize is False:
self.assertEqual(result, expected_localize + Nano(5))
else:
self.assertEqual(result, expected_localize)
def test_apply(self):
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np.datetime64('2011-01-01 09:00Z')
for offset in self.offset_types:
for dt in [sdt, ndt]:
expected = self.expecteds[offset.__name__]
self._check_offsetfunc_works(offset, 'apply', dt, expected)
expected = Timestamp(expected.date())
self._check_offsetfunc_works(offset, 'apply', dt, expected,
normalize=True)
def test_rollforward(self):
expecteds = self.expecteds.copy()
# result will not be changed if the target is on the offset
no_changes = ['Day', 'MonthBegin', 'YearBegin', 'Week', 'Hour', 'Minute',
'Second', 'Milli', 'Micro', 'Nano', 'DateOffset']
for n in no_changes:
expecteds[n] = Timestamp('2011/01/01 09:00')
expecteds['BusinessHour'] = Timestamp('2011-01-03 09:00:00')
# but be changed when normalize=True
norm_expected = expecteds.copy()
for k in norm_expected:
norm_expected[k] = Timestamp(norm_expected[k].date())
normalized = {'Day': Timestamp('2011-01-02 00:00:00'),
'DateOffset': Timestamp('2011-01-02 00:00:00'),
'MonthBegin': Timestamp('2011-02-01 00:00:00'),
'YearBegin': Timestamp('2012-01-01 00:00:00'),
'Week': Timestamp('2011-01-08 00:00:00'),
'Hour': Timestamp('2011-01-01 00:00:00'),
'Minute': Timestamp('2011-01-01 00:00:00'),
'Second': Timestamp('2011-01-01 00:00:00'),
'Milli': Timestamp('2011-01-01 00:00:00'),
'Micro': Timestamp('2011-01-01 00:00:00')}
norm_expected.update(normalized)
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np.datetime64('2011-01-01 09:00Z')
for offset in self.offset_types:
for dt in [sdt, ndt]:
expected = expecteds[offset.__name__]
self._check_offsetfunc_works(offset, 'rollforward', dt, expected)
expected = norm_expected[offset.__name__]
self._check_offsetfunc_works(offset, 'rollforward', dt, expected,
normalize=True)
def test_rollback(self):
expecteds = {'BusinessDay': Timestamp('2010-12-31 09:00:00'),
'CustomBusinessDay': Timestamp('2010-12-31 09:00:00'),
'CustomBusinessMonthEnd': Timestamp('2010-12-31 09:00:00'),
'CustomBusinessMonthBegin': Timestamp('2010-12-01 09:00:00'),
'BusinessMonthBegin': Timestamp('2010-12-01 09:00:00'),
'MonthEnd': Timestamp('2010-12-31 09:00:00'),
'BusinessMonthEnd': Timestamp('2010-12-31 09:00:00'),
'BYearBegin': Timestamp('2010-01-01 09:00:00'),
'YearEnd': Timestamp('2010-12-31 09:00:00'),
'BYearEnd': Timestamp('2010-12-31 09:00:00'),
'QuarterBegin': Timestamp('2010-12-01 09:00:00'),
'BQuarterBegin': Timestamp('2010-12-01 09:00:00'),
'QuarterEnd': Timestamp('2010-12-31 09:00:00'),
'BQuarterEnd': Timestamp('2010-12-31 09:00:00'),
'BusinessHour': Timestamp('2010-12-31 17:00:00'),
'WeekOfMonth': Timestamp('2010-12-11 09:00:00'),
'LastWeekOfMonth': Timestamp('2010-12-25 09:00:00'),
'FY5253Quarter': Timestamp('2010-10-26 09:00:00'),
'FY5253': Timestamp('2010-01-26 09:00:00'),
'Easter': Timestamp('2010-04-04 09:00:00')}
# result will not be changed if the target is on the offset
for n in ['Day', 'MonthBegin', 'YearBegin', 'Week', 'Hour', 'Minute',
'Second', 'Milli', 'Micro', 'Nano', 'DateOffset']:
expecteds[n] = Timestamp('2011/01/01 09:00')
# but be changed when normalize=True
norm_expected = expecteds.copy()
for k in norm_expected:
norm_expected[k] = Timestamp(norm_expected[k].date())
normalized = {'Day': Timestamp('2010-12-31 00:00:00'),
'DateOffset': Timestamp('2010-12-31 00:00:00'),
'MonthBegin': Timestamp('2010-12-01 00:00:00'),
'YearBegin': Timestamp('2010-01-01 00:00:00'),
'Week': Timestamp('2010-12-25 00:00:00'),
'Hour': Timestamp('2011-01-01 00:00:00'),
'Minute': Timestamp('2011-01-01 00:00:00'),
'Second': Timestamp('2011-01-01 00:00:00'),
'Milli': Timestamp('2011-01-01 00:00:00'),
'Micro': Timestamp('2011-01-01 00:00:00')}
norm_expected.update(normalized)
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np.datetime64('2011-01-01 09:00Z')
for offset in self.offset_types:
for dt in [sdt, ndt]:
expected = expecteds[offset.__name__]
self._check_offsetfunc_works(offset, 'rollback', dt, expected)
expected = norm_expected[offset.__name__]
self._check_offsetfunc_works(offset, 'rollback',
dt, expected, normalize=True)
def test_onOffset(self):
for offset in self.offset_types:
dt = self.expecteds[offset.__name__]
offset_s = self._get_offset(offset)
self.assertTrue(offset_s.onOffset(dt))
# when normalize=True, onOffset checks time is 00:00:00
offset_n = self._get_offset(offset, normalize=True)
self.assertFalse(offset_n.onOffset(dt))
if offset is BusinessHour:
# In default BusinessHour (9:00-17:00), normalized time
# cannot be in business hour range
continue
date = datetime(dt.year, dt.month, dt.day)
self.assertTrue(offset_n.onOffset(date))
def test_add(self):
dt = datetime(2011, 1, 1, 9, 0)
for offset in self.offset_types:
offset_s = self._get_offset(offset)
expected = self.expecteds[offset.__name__]
result_dt = dt + offset_s
result_ts = Timestamp(dt) + offset_s
for result in [result_dt, result_ts]:
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, expected)
tm._skip_if_no_pytz()
for tz in self.timezones:
expected_localize = expected.tz_localize(tz)
result = Timestamp(dt, tz=tz) + offset_s
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, expected_localize)
# normalize=True
offset_s = self._get_offset(offset, normalize=True)
expected = Timestamp(expected.date())
result_dt = dt + offset_s
result_ts = Timestamp(dt) + offset_s
for result in [result_dt, result_ts]:
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, expected)
for tz in self.timezones:
expected_localize = expected.tz_localize(tz)
result = Timestamp(dt, tz=tz) + offset_s
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, expected_localize)
def test_pickle_v0_15_2(self):
offsets = {'DateOffset': DateOffset(years=1),
'MonthBegin': MonthBegin(1),
'Day': Day(1),
'YearBegin': YearBegin(1),
'Week': Week(1)}
pickle_path = os.path.join(tm.get_data_path(),
'dateoffset_0_15_2.pickle')
# This code was executed once on v0.15.2 to generate the pickle:
# with open(pickle_path, 'wb') as f: pickle.dump(offsets, f)
#
tm.assert_dict_equal(offsets, read_pickle(pickle_path))
class TestDateOffset(Base):
_multiprocess_can_split_ = True
def setUp(self):
self.d = Timestamp(datetime(2008, 1, 2))
_offset_map.clear()
def test_repr(self):
repr(DateOffset())
repr(DateOffset(2))
repr(2 * DateOffset())
repr(2 * DateOffset(months=2))
def test_mul(self):
assert DateOffset(2) == 2 * DateOffset(1)
assert DateOffset(2) == DateOffset(1) * 2
def test_constructor(self):
assert((self.d + DateOffset(months=2)) == datetime(2008, 3, 2))
assert((self.d - DateOffset(months=2)) == datetime(2007, 11, 2))
assert((self.d + DateOffset(2)) == datetime(2008, 1, 4))
assert not DateOffset(2).isAnchored()
assert DateOffset(1).isAnchored()
d = datetime(2008, 1, 31)
assert((d + DateOffset(months=1)) == datetime(2008, 2, 29))
def test_copy(self):
assert(DateOffset(months=2).copy() == DateOffset(months=2))
def test_eq(self):
offset1 = DateOffset(days=1)
offset2 = DateOffset(days=365)
self.assertNotEqual(offset1, offset2)
class TestBusinessDay(Base):
_multiprocess_can_split_ = True
_offset = BDay
def setUp(self):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
self.offset2 = BDay(2)
def test_different_normalize_equals(self):
# equivalent in this special case
offset = BDay()
offset2 = BDay()
offset2.normalize = True
self.assertEqual(offset, offset2)
def test_repr(self):
self.assertEqual(repr(self.offset), '<BusinessDay>')
assert repr(self.offset2) == '<2 * BusinessDays>'
expected = '<BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def testEQ(self):
self.assertEqual(self.offset2, self.offset2)
def test_mul(self):
pass
def test_hash(self):
self.assertEqual(hash(self.offset2), hash(self.offset2))
def testCall(self):
self.assertEqual(self.offset2(self.d), datetime(2008, 1, 3))
def testRAdd(self):
self.assertEqual(self.d + self.offset2, self.offset2 + self.d)
def testSub(self):
off = self.offset2
self.assertRaises(Exception, off.__sub__, self.d)
self.assertEqual(2 * off - off, off)
self.assertEqual(self.d - self.offset2, self.d + BDay(-2))
def testRSub(self):
self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d))
def testMult1(self):
self.assertEqual(self.d + 10 * self.offset, self.d + BDay(10))
def testMult2(self):
self.assertEqual(self.d + (-5 * BDay(-10)),
self.d + BDay(50))
def testRollback1(self):
self.assertEqual(BDay(10).rollback(self.d), self.d)
def testRollback2(self):
self.assertEqual(
BDay(10).rollback(datetime(2008, 1, 5)), datetime(2008, 1, 4))
def testRollforward1(self):
self.assertEqual(BDay(10).rollforward(self.d), self.d)
def testRollforward2(self):
self.assertEqual(
BDay(10).rollforward(datetime(2008, 1, 5)), datetime(2008, 1, 7))
def test_roll_date_object(self):
offset = BDay()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
self.assertEqual(result, datetime(2012, 9, 14))
result = offset.rollforward(dt)
self.assertEqual(result, datetime(2012, 9, 17))
offset = offsets.Day()
result = offset.rollback(dt)
self.assertEqual(result, datetime(2012, 9, 15))
result = offset.rollforward(dt)
self.assertEqual(result, datetime(2012, 9, 15))
def test_onOffset(self):
tests = [(BDay(), datetime(2008, 1, 1), True),
(BDay(), datetime(2008, 1, 5), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
tests = []
tests.append((bday,
{datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
tests.append((2 * bday,
{datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
tests.append((-bday,
{datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
tests.append((-2 * bday,
{datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
tests.append((BDay(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + BDay(10)
self.assertEqual(result, datetime(2012, 11, 6))
result = dt + BDay(100) - BDay(100)
self.assertEqual(result, dt)
off = BDay() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 12, 23)
self.assertEqual(rs, xp)
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2011, 12, 26)
self.assertEqual(rs, xp)
off = BDay() * 10
rs = datetime(2014, 1, 5) + off # see #5890
xp = datetime(2014, 1, 17)
self.assertEqual(rs, xp)
def test_apply_corner(self):
self.assertRaises(TypeError, BDay().apply, BMonthEnd())
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = BDay()
offset2 = BDay()
self.assertFalse(offset1 != offset2)
class TestBusinessHour(Base):
_multiprocess_can_split_ = True
_offset = BusinessHour
def setUp(self):
self.d = datetime(2014, 7, 1, 10, 00)
self.offset1 = BusinessHour()
self.offset2 = BusinessHour(n=3)
self.offset3 = BusinessHour(n=-1)
self.offset4 = BusinessHour(n=-4)
from datetime import time as dt_time
self.offset5 = BusinessHour(start=dt_time(11, 0), end=dt_time(14, 30))
self.offset6 = BusinessHour(start='20:00', end='05:00')
self.offset7 = BusinessHour(n=-2, start=dt_time(21, 30), end=dt_time(6, 30))
def test_constructor_errors(self):
from datetime import time as dt_time
with tm.assertRaises(ValueError):
BusinessHour(start=dt_time(11, 0, 5))
with tm.assertRaises(ValueError):
BusinessHour(start='AAA')
with tm.assertRaises(ValueError):
BusinessHour(start='14:00:05')
def test_different_normalize_equals(self):
# equivalent in this special case
offset = self._offset()
offset2 = self._offset()
offset2.normalize = True
self.assertEqual(offset, offset2)
def test_repr(self):
self.assertEqual(repr(self.offset1), '<BusinessHour: BH=09:00-17:00>')
self.assertEqual(repr(self.offset2), '<3 * BusinessHours: BH=09:00-17:00>')
self.assertEqual(repr(self.offset3), '<-1 * BusinessHour: BH=09:00-17:00>')
self.assertEqual(repr(self.offset4), '<-4 * BusinessHours: BH=09:00-17:00>')
self.assertEqual(repr(self.offset5), '<BusinessHour: BH=11:00-14:30>')
self.assertEqual(repr(self.offset6), '<BusinessHour: BH=20:00-05:00>')
self.assertEqual(repr(self.offset7), '<-2 * BusinessHours: BH=21:30-06:30>')
def test_with_offset(self):
expected = Timestamp('2014-07-01 13:00')
self.assertEqual(self.d + BusinessHour() * 3, expected)
self.assertEqual(self.d + BusinessHour(n=3), expected)
def testEQ(self):
for offset in [self.offset1, self.offset2, self.offset3, self.offset4]:
self.assertEqual(offset, offset)
self.assertNotEqual(BusinessHour(), BusinessHour(-1))
self.assertEqual(BusinessHour(start='09:00'), BusinessHour())
self.assertNotEqual(BusinessHour(start='09:00'), BusinessHour(start='09:01'))
self.assertNotEqual(BusinessHour(start='09:00', end='17:00'),
BusinessHour(start='17:00', end='09:01'))
def test_hash(self):
self.assertEqual(hash(self.offset2), hash(self.offset2))
def testCall(self):
self.assertEqual(self.offset1(self.d), datetime(2014, 7, 1, 11))
self.assertEqual(self.offset2(self.d), datetime(2014, 7, 1, 13))
self.assertEqual(self.offset3(self.d), datetime(2014, 6, 30, 17))
self.assertEqual(self.offset4(self.d), datetime(2014, 6, 30, 14))
def testRAdd(self):
self.assertEqual(self.d + self.offset2, self.offset2 + self.d)
def testSub(self):
off = self.offset2
self.assertRaises(Exception, off.__sub__, self.d)
self.assertEqual(2 * off - off, off)
self.assertEqual(self.d - self.offset2, self.d + self._offset(-3))
def testRSub(self):
self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d))
def testMult1(self):
self.assertEqual(self.d + 5 * self.offset1, self.d + self._offset(5))
def testMult2(self):
self.assertEqual(self.d + (-3 * self._offset(-2)),
self.d + self._offset(6))
def testRollback1(self):
self.assertEqual(self.offset1.rollback(self.d), self.d)
self.assertEqual(self.offset2.rollback(self.d), self.d)
self.assertEqual(self.offset3.rollback(self.d), self.d)
self.assertEqual(self.offset4.rollback(self.d), self.d)
self.assertEqual(self.offset5.rollback(self.d), datetime(2014, 6, 30, 14, 30))
self.assertEqual(self.offset6.rollback(self.d), datetime(2014, 7, 1, 5, 0))
self.assertEqual(self.offset7.rollback(self.d), datetime(2014, 7, 1, 6, 30))
d = datetime(2014, 7, 1, 0)
self.assertEqual(self.offset1.rollback(d), datetime(2014, 6, 30, 17))
self.assertEqual(self.offset2.rollback(d), datetime(2014, 6, 30, 17))
self.assertEqual(self.offset3.rollback(d), datetime(2014, 6, 30, 17))
self.assertEqual(self.offset4.rollback(d), datetime(2014, 6, 30, 17))
self.assertEqual(self.offset5.rollback(d), datetime(2014, 6, 30, 14, 30))
self.assertEqual(self.offset6.rollback(d), d)
self.assertEqual(self.offset7.rollback(d), d)
self.assertEqual(self._offset(5).rollback(self.d), self.d)
def testRollback2(self):
self.assertEqual(self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)),
datetime(2014, 7, 4, 17, 0))
def testRollforward1(self):
self.assertEqual(self.offset1.rollforward(self.d), self.d)
self.assertEqual(self.offset2.rollforward(self.d), self.d)
self.assertEqual(self.offset3.rollforward(self.d), self.d)
self.assertEqual(self.offset4.rollforward(self.d), self.d)
self.assertEqual(self.offset5.rollforward(self.d), datetime(2014, 7, 1, 11, 0))
self.assertEqual(self.offset6.rollforward(self.d), datetime(2014, 7, 1, 20, 0))
self.assertEqual(self.offset7.rollforward(self.d), datetime(2014, 7, 1, 21, 30))
d = datetime(2014, 7, 1, 0)
self.assertEqual(self.offset1.rollforward(d), datetime(2014, 7, 1, 9))
self.assertEqual(self.offset2.rollforward(d), datetime(2014, 7, 1, 9))
self.assertEqual(self.offset3.rollforward(d), datetime(2014, 7, 1, 9))
self.assertEqual(self.offset4.rollforward(d), datetime(2014, 7, 1, 9))
self.assertEqual(self.offset5.rollforward(d), datetime(2014, 7, 1, 11))
self.assertEqual(self.offset6.rollforward(d), d)
self.assertEqual(self.offset7.rollforward(d), d)
self.assertEqual(self._offset(5).rollforward(self.d), self.d)
def testRollforward2(self):
self.assertEqual(self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)),
datetime(2014, 7, 7, 9))
def test_roll_date_object(self):
offset = BusinessHour()
dt = datetime(2014, 7, 6, 15, 0)
result = offset.rollback(dt)
self.assertEqual(result, datetime(2014, 7, 4, 17))
result = offset.rollforward(dt)
self.assertEqual(result, datetime(2014, 7, 7, 9))
def test_normalize(self):
tests = []
tests.append((BusinessHour(normalize=True),
{datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2),
datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
datetime(2014, 7, 6, 10): datetime(2014, 7, 7)}))
tests.append((BusinessHour(-1, normalize=True),
{datetime(2014, 7, 1, 8): datetime(2014, 6, 30),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
datetime(2014, 7, 1, 10): datetime(2014, 6, 30),
datetime(2014, 7, 1, 0): datetime(2014, 6, 30),
datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
datetime(2014, 7, 6, 10): datetime(2014, 7, 4)}))
tests.append((BusinessHour(1, normalize=True, start='17:00', end='04:00'),
{datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
datetime(2014, 7, 2, 3): datetime(2014, 7, 2),
datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
datetime(2014, 7, 7, 17): datetime(2014, 7, 7)}))
for offset, cases in tests:
for dt, expected in compat.iteritems(cases):
self.assertEqual(offset.apply(dt), expected)
def test_onOffset(self):
tests = []
tests.append((BusinessHour(),
{datetime(2014, 7, 1, 9): True,
datetime(2014, 7, 1, 8, 59): False,
datetime(2014, 7, 1, 8): False,
datetime(2014, 7, 1, 17): True,
datetime(2014, 7, 1, 17, 1): False,
datetime(2014, 7, 1, 18): False,
datetime(2014, 7, 5, 9): False,
datetime(2014, 7, 6, 12): False}))
tests.append((BusinessHour(start='10:00', end='15:00'),
{datetime(2014, 7, 1, 9): False,
datetime(2014, 7, 1, 10): True,
datetime(2014, 7, 1, 15): True,
datetime(2014, 7, 1, 15, 1): False,
datetime(2014, 7, 5, 12): False,
datetime(2014, 7, 6, 12): False}))
tests.append((BusinessHour(start='19:00', end='05:00'),
{datetime(2014, 7, 1, 9, 0): False,
datetime(2014, 7, 1, 10, 0): False,
datetime(2014, 7, 1, 15): False,
datetime(2014, 7, 1, 15, 1): False,
datetime(2014, 7, 5, 12, 0): False,
datetime(2014, 7, 6, 12, 0): False,
datetime(2014, 7, 1, 19, 0): True,
datetime(2014, 7, 2, 0, 0): True,
datetime(2014, 7, 4, 23): True,
datetime(2014, 7, 5, 1): True,
datetime(2014, 7, 5, 5, 0): True,
datetime(2014, 7, 6, 23, 0): False,
datetime(2014, 7, 7, 3, 0): False}))
for offset, cases in tests:
for dt, expected in compat.iteritems(cases):
self.assertEqual(offset.onOffset(dt), expected)
def test_opening_time(self):
tests = []
# opening time should be affected by sign of n, not by n's value and end
tests.append(([BusinessHour(), BusinessHour(n=2), BusinessHour(n=4),
BusinessHour(end='10:00'), BusinessHour(n=2, end='4:00'),
BusinessHour(n=4, end='15:00')],
{datetime(2014, 7, 1, 11): (datetime(2014, 7, 2, 9), datetime(2014, 7, 1, 9)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 9), datetime(2014, 7, 1, 9)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 9), datetime(2014, 7, 1, 9)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 9), datetime(2014, 7, 1, 9)),
# if timestamp is on opening time, next opening time is as it is
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 9), datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 10): (datetime(2014, 7, 3, 9), datetime(2014, 7, 2, 9)),
# 2014-07-05 is saturday
datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 9), datetime(2014, 7, 4, 9)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 7, 9), datetime(2014, 7, 4, 9)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 9), datetime(2014, 7, 4, 9)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 9), datetime(2014, 7, 4, 9)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 9), datetime(2014, 7, 4, 9)),
datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 8, 9), datetime(2014, 7, 7, 9))}))
tests.append(([BusinessHour(start='11:15'), BusinessHour(n=2, start='11:15'),
BusinessHour(n=3, start='11:15'),
BusinessHour(start='11:15', end='10:00'),
BusinessHour(n=2, start='11:15', end='4:00'),
BusinessHour(n=3, start='11:15', end='15:00')],
{datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 11, 15), datetime(2014, 6, 30, 11, 15)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 11, 15), datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 11, 15), datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 11, 15), datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 11, 15), datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 10): (datetime(2014, 7, 2, 11, 15), datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 11, 15): (datetime(2014, 7, 2, 11, 15), datetime(2014, 7, 2, 11, 15)),
datetime(2014, 7, 2, 11, 15, 1): (datetime(2014, 7, 3, 11, 15), datetime(2014, 7, 2, 11, 15)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 11, 15), datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 11, 15), datetime(2014, 7, 3, 11, 15)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 11, 15), datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 11, 15), datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 11, 15), datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 7, 11, 15), datetime(2014, 7, 4, 11, 15))}))
tests.append(([BusinessHour(-1), BusinessHour(n=-2), BusinessHour(n=-4),
BusinessHour(n=-1, end='10:00'), BusinessHour(n=-2, end='4:00'),
BusinessHour(n=-4, end='15:00')],
{datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 9), datetime(2014, 7, 2, 9)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 1, 9), datetime(2014, 7, 2, 9)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 1, 9), datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 1, 9), datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 9), datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 10): (datetime(2014, 7, 2, 9), datetime(2014, 7, 3, 9)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 4, 9), datetime(2014, 7, 7, 9)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 9), datetime(2014, 7, 7, 9)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 4, 9), datetime(2014, 7, 7, 9)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 4, 9), datetime(2014, 7, 7, 9)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 4, 9), datetime(2014, 7, 7, 9)),
datetime(2014, 7, 7, 9): (datetime(2014, 7, 7, 9), datetime(2014, 7, 7, 9)),
datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 7, 9), datetime(2014, 7, 8, 9))}))
tests.append(([BusinessHour(start='17:00', end='05:00'),
BusinessHour(n=3, start='17:00', end='03:00')],
{datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 17), datetime(2014, 6, 30, 17)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 17), datetime(2014, 7, 1, 17)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 17), datetime(2014, 7, 1, 17)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 17), datetime(2014, 7, 1, 17)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 17), datetime(2014, 7, 1, 17)),
datetime(2014, 7, 4, 17): (datetime(2014, 7, 4, 17), datetime(2014, 7, 4, 17)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 17), datetime(2014, 7, 4, 17)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 17), datetime(2014, 7, 3, 17)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 17), datetime(2014, 7, 4, 17)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 17), datetime(2014, 7, 4, 17)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 17), datetime(2014, 7, 4, 17)),
datetime(2014, 7, 7, 17, 1): (datetime(2014, 7, 8, 17), datetime(2014, 7, 7, 17)),}))
tests.append(([BusinessHour(-1, start='17:00', end='05:00'),
BusinessHour(n=-2, start='17:00', end='03:00')],
{datetime(2014, 7, 1, 11): (datetime(2014, 6, 30, 17), datetime(2014, 7, 1, 17)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 1, 17), datetime(2014, 7, 2, 17)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 1, 17), datetime(2014, 7, 2, 17)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 1, 17), datetime(2014, 7, 2, 17)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 1, 17), datetime(2014, 7, 2, 17)),
datetime(2014, 7, 2, 16, 59): (datetime(2014, 7, 1, 17), datetime(2014, 7, 2, 17)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 4, 17), datetime(2014, 7, 7, 17)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 3, 17), datetime(2014, 7, 4, 17)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 4, 17), datetime(2014, 7, 7, 17)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 4, 17), datetime(2014, 7, 7, 17)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 4, 17), datetime(2014, 7, 7, 17)),
datetime(2014, 7, 7, 18): (datetime(2014, 7, 7, 17), datetime(2014, 7, 8, 17))}))
for offsets, cases in tests:
for offset in offsets:
for dt, (exp_next, exp_prev) in compat.iteritems(cases):
self.assertEqual(offset._next_opening_time(dt), exp_next)
self.assertEqual(offset._prev_opening_time(dt), exp_prev)
def test_apply(self):
tests = []
tests.append((BusinessHour(),
{datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 2, 9, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 10),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 12),
# out of business hours
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30)}))
tests.append((BusinessHour(4),
{datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 11),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 12),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30)}))
tests.append((BusinessHour(-1),
{datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 10),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 15),
datetime(2014, 7, 1, 10): datetime(2014, 6, 30, 17),
datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
datetime(2014, 7, 1, 9, 30, 15): datetime(2014, 6, 30, 16, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 5): datetime(2014, 6, 30, 16),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 10),
# out of business hours
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 16),
datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 16),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 16),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 16),
datetime(2014, 7, 7, 9): datetime(2014, 7, 4, 16),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 16, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 16, 30, 30)}))
tests.append((BusinessHour(-4),
{datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 15),
datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 11),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 13),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 13),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 13, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 13, 30, 30)}))
tests.append((BusinessHour(start='13:00', end='16:00'),
{datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 13),
datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 14),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 14),
datetime(2014, 7, 1, 15, 30, 15): datetime(2014, 7, 2, 13, 30, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 14),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14)}))
tests.append((BusinessHour(n=2, start='13:00', end='16:00'),
{datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 14): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15),
datetime(2014, 7, 2, 14, 30): datetime(2014, 7, 3, 13, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 15),
datetime(2014, 7, 4, 14, 30): datetime(2014, 7, 7, 13, 30),
datetime(2014, 7, 4, 14, 30, 30): datetime(2014, 7, 7, 13, 30, 30)}))
tests.append((BusinessHour(n=-1, start='13:00', end='16:00'),
{datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 14): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 15): datetime(2014, 7, 2, 14),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 16): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 13, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 15),
datetime(2014, 7, 7, 11): datetime(2014, 7, 4, 15)}))
tests.append((BusinessHour(n=-3, start='10:00', end='16:00'),
{datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 11),
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 11, 30): datetime(2014, 7, 1, 14, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
datetime(2014, 7, 4, 10): datetime(2014, 7, 3, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
datetime(2014, 7, 4, 16): datetime(2014, 7, 4, 13),
datetime(2014, 7, 4, 12, 30): datetime(2014, 7, 3, 15, 30),
datetime(2014, 7, 4, 12, 30, 30): datetime(2014, 7, 3, 15, 30, 30)}))
tests.append((BusinessHour(start='19:00', end='05:00'),
{datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 20),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 4, 30): datetime(2014, 7, 2, 19, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 1),
datetime(2014, 7, 4, 10): datetime(2014, 7, 4, 20),
datetime(2014, 7, 4, 23): datetime(2014, 7, 5, 0),
datetime(2014, 7, 5, 0): datetime(2014, 7, 5, 1),
datetime(2014, 7, 5, 4): datetime(2014, 7, 7, 19),
datetime(2014, 7, 5, 4, 30): datetime(2014, 7, 7, 19, 30),
datetime(2014, 7, 5, 4, 30, 30): datetime(2014, 7, 7, 19, 30, 30)}))
tests.append((BusinessHour(n=-1, start='19:00', end='05:00'),
{datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23),
datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4),
datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22),
datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23),
datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 3),
datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 5, 4, 30),
datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 5, 4, 30, 30)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_apply_large_n(self):
tests = []
tests.append((BusinessHour(40), # A week later
{datetime(2014, 7, 1, 11): datetime(2014, 7, 8, 11),
datetime(2014, 7, 1, 13): datetime(2014, 7, 8, 13),
datetime(2014, 7, 1, 15): datetime(2014, 7, 8, 15),
datetime(2014, 7, 1, 16): datetime(2014, 7, 8, 16),
datetime(2014, 7, 1, 17): datetime(2014, 7, 9, 9),
datetime(2014, 7, 2, 11): datetime(2014, 7, 9, 11),
datetime(2014, 7, 2, 8): datetime(2014, 7, 9, 9),
datetime(2014, 7, 2, 19): datetime(2014, 7, 10, 9),
datetime(2014, 7, 2, 23): datetime(2014, 7, 10, 9),
datetime(2014, 7, 3, 0): datetime(2014, 7, 10, 9),
datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 9),
datetime(2014, 7, 4, 18): datetime(2014, 7, 14, 9),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 14, 9, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 14, 9, 30, 30)}))
tests.append((BusinessHour(-25), # 3 days and 1 hour before
{datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10),
datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 12),
datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 16),
datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 17),
datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10),
datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 16),
datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 16),
datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 16),
datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 16),
datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 16),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 16, 30),
datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30, 30)}))
tests.append((BusinessHour(28, start='21:00', end='02:00'), # 5 days and 3 hours later
{datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0),
datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 1),
datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21),
datetime(2014, 7, 2, 2): datetime(2014, 7, 10, 0),
datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0),
datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23),
datetime(2014, 7, 4, 2): datetime(2014, 7, 12, 0),
datetime(2014, 7, 4, 3): datetime(2014, 7, 12, 0),
datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23),
datetime(2014, 7, 5, 15): datetime(2014, 7, 15, 0),
datetime(2014, 7, 6, 18): datetime(2014, 7, 15, 0),
datetime(2014, 7, 7, 1): datetime(2014, 7, 15, 0),
datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21, 30)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_apply_nanoseconds(self):
tests = []
tests.append((BusinessHour(),
{Timestamp('2014-07-04 15:00') + Nano(5): Timestamp('2014-07-04 16:00') + Nano(5),
Timestamp('2014-07-04 16:00') + Nano(5): Timestamp('2014-07-07 09:00') + Nano(5),
Timestamp('2014-07-04 16:00') - Nano(5): Timestamp('2014-07-04 17:00') - Nano(5)
}))
tests.append((BusinessHour(-1),
{Timestamp('2014-07-04 15:00') + Nano(5): Timestamp('2014-07-04 14:00') + Nano(5),
Timestamp('2014-07-04 10:00') + Nano(5): Timestamp('2014-07-04 09:00') + Nano(5),
Timestamp('2014-07-04 10:00') - Nano(5): Timestamp('2014-07-03 17:00') - Nano(5),
}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = self._offset()
offset2 = self._offset()
self.assertFalse(offset1 != offset2)
def test_datetimeindex(self):
idx1 = DatetimeIndex(start='2014-07-04 15:00', end='2014-07-08 10:00', freq='BH')
idx2 = DatetimeIndex(start='2014-07-04 15:00', periods=12, freq='BH')
idx3 = DatetimeIndex(end='2014-07-08 10:00', periods=12, freq='BH')
expected = DatetimeIndex(['2014-07-04 15:00', '2014-07-04 16:00', '2014-07-07 09:00',
'2014-07-07 10:00', '2014-07-07 11:00', '2014-07-07 12:00',
'2014-07-07 13:00', '2014-07-07 14:00', '2014-07-07 15:00',
'2014-07-07 16:00', '2014-07-08 09:00', '2014-07-08 10:00'],
freq='BH')
for idx in [idx1, idx2, idx3]:
tm.assert_index_equal(idx, expected)
idx1 = DatetimeIndex(start='2014-07-04 15:45', end='2014-07-08 10:45', freq='BH')
idx2 = DatetimeIndex(start='2014-07-04 15:45', periods=12, freq='BH')
idx3 = DatetimeIndex(end='2014-07-08 10:45', periods=12, freq='BH')
expected = DatetimeIndex(['2014-07-04 15:45', '2014-07-04 16:45', '2014-07-07 09:45',
'2014-07-07 10:45', '2014-07-07 11:45', '2014-07-07 12:45',
'2014-07-07 13:45', '2014-07-07 14:45', '2014-07-07 15:45',
'2014-07-07 16:45', '2014-07-08 09:45', '2014-07-08 10:45'],
freq='BH')
expected = idx1
for idx in [idx1, idx2, idx3]:
tm.assert_index_equal(idx, expected)
class TestCustomBusinessDay(Base):
_multiprocess_can_split_ = True
_offset = CDay
def setUp(self):
self.d = datetime(2008, 1, 1)
self.nd = np.datetime64('2008-01-01 00:00:00Z')
tm._skip_if_no_cday()
self.offset = CDay()
self.offset2 = CDay(2)
def test_different_normalize_equals(self):
# equivalent in this special case
offset = CDay()
offset2 = CDay()
offset2.normalize = True
self.assertEqual(offset, offset2)
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessDay>'
assert repr(self.offset2) == '<2 * CustomBusinessDays>'
expected = '<BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def testEQ(self):
self.assertEqual(self.offset2, self.offset2)
def test_mul(self):
pass
def test_hash(self):
self.assertEqual(hash(self.offset2), hash(self.offset2))
def testCall(self):
self.assertEqual(self.offset2(self.d), datetime(2008, 1, 3))
self.assertEqual(self.offset2(self.nd), datetime(2008, 1, 3))
def testRAdd(self):
self.assertEqual(self.d + self.offset2, self.offset2 + self.d)
def testSub(self):
off = self.offset2
self.assertRaises(Exception, off.__sub__, self.d)
self.assertEqual(2 * off - off, off)
self.assertEqual(self.d - self.offset2, self.d + CDay(-2))
def testRSub(self):
self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d))
def testMult1(self):
self.assertEqual(self.d + 10 * self.offset, self.d + CDay(10))
def testMult2(self):
self.assertEqual(self.d + (-5 * CDay(-10)),
self.d + CDay(50))
def testRollback1(self):
self.assertEqual(CDay(10).rollback(self.d), self.d)
def testRollback2(self):
self.assertEqual(
CDay(10).rollback(datetime(2008, 1, 5)), datetime(2008, 1, 4))
def testRollforward1(self):
self.assertEqual(CDay(10).rollforward(self.d), self.d)
def testRollforward2(self):
self.assertEqual(
CDay(10).rollforward(datetime(2008, 1, 5)), datetime(2008, 1, 7))
def test_roll_date_object(self):
offset = CDay()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
self.assertEqual(result, datetime(2012, 9, 14))
result = offset.rollforward(dt)
self.assertEqual(result, datetime(2012, 9, 17))
offset = offsets.Day()
result = offset.rollback(dt)
self.assertEqual(result, datetime(2012, 9, 15))
result = offset.rollforward(dt)
self.assertEqual(result, datetime(2012, 9, 15))
def test_onOffset(self):
tests = [(CDay(), datetime(2008, 1, 1), True),
(CDay(), datetime(2008, 1, 5), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
from pandas.core.datetools import cday
tests = []
tests.append((cday,
{datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
tests.append((2 * cday,
{datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
tests.append((-cday,
{datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
tests.append((-2 * cday,
{datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
tests.append((CDay(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CDay(10)
self.assertEqual(result, datetime(2012, 11, 6))
result = dt + CDay(100) - CDay(100)
self.assertEqual(result, dt)
off = CDay() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 12, 23)
self.assertEqual(rs, xp)
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2011, 12, 26)
self.assertEqual(rs, xp)
def test_apply_corner(self):
self.assertRaises(Exception, CDay().apply, BMonthEnd())
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = CDay()
offset2 = CDay()
self.assertFalse(offset1 != offset2)
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-05-01', datetime(2013, 5, 1),
np.datetime64('2014-05-01')]
tday = CDay(holidays=holidays)
for year in range(2012, 2015):
dt = datetime(year, 4, 30)
xp = datetime(year, 5, 2)
rs = dt + tday
self.assertEqual(rs, xp)
def test_weekmask(self):
weekmask_saudi = 'Sat Sun Mon Tue Wed' # Thu-Fri Weekend
weekmask_uae = '1111001' # Fri-Sat Weekend
weekmask_egypt = [1,1,1,1,0,0,1] # Fri-Sat Weekend
bday_saudi = CDay(weekmask=weekmask_saudi)
bday_uae = CDay(weekmask=weekmask_uae)
bday_egypt = CDay(weekmask=weekmask_egypt)
dt = datetime(2013, 5, 1)
xp_saudi = datetime(2013, 5, 4)
xp_uae = datetime(2013, 5, 2)
xp_egypt = datetime(2013, 5, 2)
self.assertEqual(xp_saudi, dt + bday_saudi)
self.assertEqual(xp_uae, dt + bday_uae)
self.assertEqual(xp_egypt, dt + bday_egypt)
xp2 = datetime(2013, 5, 5)
self.assertEqual(xp2, dt + 2 * bday_saudi)
self.assertEqual(xp2, dt + 2 * bday_uae)
self.assertEqual(xp2, dt + 2 * bday_egypt)
def test_weekmask_and_holidays(self):
weekmask_egypt = 'Sun Mon Tue Wed Thu' # Fri-Sat Weekend
holidays = ['2012-05-01', datetime(2013, 5, 1),
np.datetime64('2014-05-01')]
bday_egypt = CDay(holidays=holidays, weekmask=weekmask_egypt)
dt = datetime(2013, 4, 30)
xp_egypt = datetime(2013, 5, 5)
self.assertEqual(xp_egypt, dt + 2 * bday_egypt)
def test_calendar(self):
calendar = USFederalHolidayCalendar()
dt = datetime(2014, 1, 17)
assertEq(CDay(calendar=calendar), dt, datetime(2014, 1, 21))
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = self.round_trip_pickle(obj)
self.assertEqual(unpickled, obj)
_check_roundtrip(self.offset)
_check_roundtrip(self.offset2)
_check_roundtrip(self.offset*2)
def test_pickle_compat_0_14_1(self):
hdays = [datetime(2013,1,1) for ele in range(4)]
pth = tm.get_data_path()
cday0_14_1 = read_pickle(os.path.join(pth, 'cday-0.14.1.pickle'))
cday = CDay(holidays=hdays)
self.assertEqual(cday, cday0_14_1)
class CustomBusinessMonthBase(object):
_multiprocess_can_split_ = True
def setUp(self):
self.d = datetime(2008, 1, 1)
tm._skip_if_no_cday()
self.offset = self._object()
self.offset2 = self._object(2)
def testEQ(self):
self.assertEqual(self.offset2, self.offset2)
def test_mul(self):
pass
def test_hash(self):
self.assertEqual(hash(self.offset2), hash(self.offset2))
def testRAdd(self):
self.assertEqual(self.d + self.offset2, self.offset2 + self.d)
def testSub(self):
off = self.offset2
self.assertRaises(Exception, off.__sub__, self.d)
self.assertEqual(2 * off - off, off)
self.assertEqual(self.d - self.offset2,
self.d + self._object(-2))
def testRSub(self):
self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d))
def testMult1(self):
self.assertEqual(self.d + 10 * self.offset,
self.d + self._object(10))
def testMult2(self):
self.assertEqual(self.d + (-5 * self._object(-10)),
self.d + self._object(50))
def test_offsets_compare_equal(self):
offset1 = self._object()
offset2 = self._object()
self.assertFalse(offset1 != offset2)
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = self.round_trip_pickle(obj)
self.assertEqual(unpickled, obj)
_check_roundtrip(self._object())
_check_roundtrip(self._object(2))
_check_roundtrip(self._object()*2)
class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
_object = CBMonthEnd
def test_different_normalize_equals(self):
# equivalent in this special case
offset = CBMonthEnd()
offset2 = CBMonthEnd()
offset2.normalize = True
self.assertEqual(offset, offset2)
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessMonthEnd>'
assert repr(self.offset2) == '<2 * CustomBusinessMonthEnds>'
def testCall(self):
self.assertEqual(self.offset2(self.d), datetime(2008, 2, 29))
def testRollback1(self):
self.assertEqual(
CDay(10).rollback(datetime(2007, 12, 31)), datetime(2007, 12, 31))
def testRollback2(self):
self.assertEqual(CBMonthEnd(10).rollback(self.d),
datetime(2007,12,31))
def testRollforward1(self):
self.assertEqual(CBMonthEnd(10).rollforward(self.d), datetime(2008,1,31))
def test_roll_date_object(self):
offset = CBMonthEnd()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
self.assertEqual(result, datetime(2012, 8, 31))
result = offset.rollforward(dt)
self.assertEqual(result, datetime(2012, 9, 28))
offset = offsets.Day()
result = offset.rollback(dt)
self.assertEqual(result, datetime(2012, 9, 15))
result = offset.rollforward(dt)
self.assertEqual(result, datetime(2012, 9, 15))
def test_onOffset(self):
tests = [(CBMonthEnd(), datetime(2008, 1, 31), True),
(CBMonthEnd(), datetime(2008, 1, 1), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
cbm = CBMonthEnd()
tests = []
tests.append((cbm,
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29)}))
tests.append((2 * cbm,
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 2, 7): datetime(2008, 3, 31)}))
tests.append((-cbm,
{datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 2, 8): datetime(2008, 1, 31)}))
tests.append((-2 * cbm,
{datetime(2008, 1, 1): datetime(2007, 11, 30),
datetime(2008, 2, 9): datetime(2007, 12, 31)}))
tests.append((CBMonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthEnd(10)
self.assertEqual(result, datetime(2013, 7, 31))
result = dt + CDay(100) - CDay(100)
self.assertEqual(result, dt)
off = CBMonthEnd() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 29)
self.assertEqual(rs, xp)
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 5, 31)
self.assertEqual(rs, xp)
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-01-31', datetime(2012, 2, 28),
np.datetime64('2012-02-29')]
bm_offset = CBMonthEnd(holidays=holidays)
dt = datetime(2012,1,1)
self.assertEqual(dt + bm_offset,datetime(2012,1,30))
self.assertEqual(dt + 2*bm_offset,datetime(2012,2,27))
def test_datetimeindex(self):
from pandas.tseries.holiday import USFederalHolidayCalendar
hcal = USFederalHolidayCalendar()
freq = CBMonthEnd(calendar=hcal)
self.assertEqual(DatetimeIndex(start='20120101',end='20130101',
freq=freq).tolist()[0],
datetime(2012,1,31))
class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):
_object = CBMonthBegin
def test_different_normalize_equals(self):
# equivalent in this special case
offset = CBMonthBegin()
offset2 = CBMonthBegin()
offset2.normalize = True
self.assertEqual(offset, offset2)
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessMonthBegin>'
assert repr(self.offset2) == '<2 * CustomBusinessMonthBegins>'
def testCall(self):
self.assertEqual(self.offset2(self.d), datetime(2008, 3, 3))
def testRollback1(self):
self.assertEqual(
CDay(10).rollback(datetime(2007, 12, 31)), datetime(2007, 12, 31))
def testRollback2(self):
self.assertEqual(CBMonthBegin(10).rollback(self.d),
datetime(2008,1,1))
def testRollforward1(self):
self.assertEqual(CBMonthBegin(10).rollforward(self.d), datetime(2008,1,1))
def test_roll_date_object(self):
offset = CBMonthBegin()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
self.assertEqual(result, datetime(2012, 9, 3))
result = offset.rollforward(dt)
self.assertEqual(result, datetime(2012, 10, 1))
offset = offsets.Day()
result = offset.rollback(dt)
self.assertEqual(result, datetime(2012, 9, 15))
result = offset.rollforward(dt)
self.assertEqual(result, datetime(2012, 9, 15))
def test_onOffset(self):
tests = [(CBMonthBegin(), datetime(2008, 1, 1), True),
(CBMonthBegin(), datetime(2008, 1, 31), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
cbm = CBMonthBegin()
tests = []
tests.append((cbm,
{datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 2, 7): datetime(2008, 3, 3)}))
tests.append((2 * cbm,
{datetime(2008, 1, 1): datetime(2008, 3, 3),
datetime(2008, 2, 7): datetime(2008, 4, 1)}))
tests.append((-cbm,
{datetime(2008, 1, 1): datetime(2007, 12, 3),
datetime(2008, 2, 8): datetime(2008, 2, 1)}))
tests.append((-2 * cbm,
{datetime(2008, 1, 1): datetime(2007, 11, 1),
datetime(2008, 2, 9): datetime(2008, 1, 1)}))
tests.append((CBMonthBegin(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 7): datetime(2008, 2, 1)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthBegin(10)
self.assertEqual(result, datetime(2013, 8, 1))
result = dt + CDay(100) - CDay(100)
self.assertEqual(result, dt)
off = CBMonthBegin() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 1)
self.assertEqual(rs, xp)
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 6, 1)
self.assertEqual(rs, xp)
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-02-01', datetime(2012, 2, 2),
np.datetime64('2012-03-01')]
bm_offset = CBMonthBegin(holidays=holidays)
dt = datetime(2012,1,1)
self.assertEqual(dt + bm_offset,datetime(2012,1,2))
self.assertEqual(dt + 2*bm_offset,datetime(2012,2,3))
def test_datetimeindex(self):
hcal = USFederalHolidayCalendar()
cbmb = CBMonthBegin(calendar=hcal)
self.assertEqual(DatetimeIndex(start='20120101', end='20130101',
freq=cbmb).tolist()[0],
datetime(2012,1,3))
def assertOnOffset(offset, date, expected):
actual = offset.onOffset(date)
assert actual == expected, ("\nExpected: %s\nActual: %s\nFor Offset: %s)"
"\nAt Date: %s" %
(expected, actual, offset, date))
class TestWeek(Base):
_offset = Week
def test_repr(self):
self.assertEqual(repr(Week(weekday=0)), "<Week: weekday=0>")
self.assertEqual(repr(Week(n=-1, weekday=0)), "<-1 * Week: weekday=0>")
self.assertEqual(repr(Week(n=-2, weekday=0)), "<-2 * Weeks: weekday=0>")
def test_corner(self):
self.assertRaises(ValueError, Week, weekday=7)
assertRaisesRegexp(ValueError, "Day must be", Week, weekday=-1)
def test_isAnchored(self):
self.assertTrue(Week(weekday=0).isAnchored())
self.assertFalse(Week().isAnchored())
self.assertFalse(Week(2, weekday=2).isAnchored())
self.assertFalse(Week(2).isAnchored())
def test_offset(self):
tests = []
tests.append((Week(), # not business week
{datetime(2008, 1, 1): datetime(2008, 1, 8),
datetime(2008, 1, 4): datetime(2008, 1, 11),
datetime(2008, 1, 5): datetime(2008, 1, 12),
datetime(2008, 1, 6): datetime(2008, 1, 13),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(weekday=0), # Mon
{datetime(2007, 12, 31): datetime(2008, 1, 7),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(0, weekday=0), # n=0 -> roll forward. Mon
{datetime(2007, 12, 31): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
tests.append((Week(-2, weekday=1), # n=0 -> roll forward. Mon
{datetime(2010, 4, 6): datetime(2010, 3, 23),
datetime(2010, 4, 8): datetime(2010, 3, 30),
datetime(2010, 4, 5): datetime(2010, 3, 23)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_onOffset(self):
for weekday in range(7):
offset = Week(weekday=weekday)
for day in range(1, 8):
date = datetime(2008, 1, day)
if day % 7 == weekday:
expected = True
else:
expected = False
assertOnOffset(offset, date, expected)
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = Week()
offset2 = Week()
self.assertFalse(offset1 != offset2)
class TestWeekOfMonth(Base):
_offset = WeekOfMonth
def test_constructor(self):
assertRaisesRegexp(ValueError, "^N cannot be 0", WeekOfMonth, n=0, week=1, weekday=1)
assertRaisesRegexp(ValueError, "^Week", WeekOfMonth, n=1, week=4, weekday=0)
assertRaisesRegexp(ValueError, "^Week", WeekOfMonth, n=1, week=-1, weekday=0)
assertRaisesRegexp(ValueError, "^Day", WeekOfMonth, n=1, week=0, weekday=-1)
assertRaisesRegexp(ValueError, "^Day", WeekOfMonth, n=1, week=0, weekday=7)
def test_repr(self):
self.assertEqual(repr(WeekOfMonth(weekday=1,week=2)), "<WeekOfMonth: week=2, weekday=1>")
def test_offset(self):
date1 = datetime(2011, 1, 4) # 1st Tuesday of Month
date2 = datetime(2011, 1, 11) # 2nd Tuesday of Month
date3 = datetime(2011, 1, 18) # 3rd Tuesday of Month
date4 = datetime(2011, 1, 25) # 4th Tuesday of Month
# see for loop for structure
test_cases = [
(-2, 2, 1, date1, datetime(2010, 11, 16)),
(-2, 2, 1, date2, datetime(2010, 11, 16)),
(-2, 2, 1, date3, datetime(2010, 11, 16)),
(-2, 2, 1, date4, datetime(2010, 12, 21)),
(-1, 2, 1, date1, datetime(2010, 12, 21)),
(-1, 2, 1, date2, datetime(2010, 12, 21)),
(-1, 2, 1, date3, datetime(2010, 12, 21)),
(-1, 2, 1, date4, datetime(2011, 1, 18)),
(1, 0, 0, date1, datetime(2011, 2, 7)),
(1, 0, 0, date2, datetime(2011, 2, 7)),
(1, 0, 0, date3, datetime(2011, 2, 7)),
(1, 0, 0, date4, datetime(2011, 2, 7)),
(1, 0, 1, date1, datetime(2011, 2, 1)),
(1, 0, 1, date2, datetime(2011, 2, 1)),
(1, 0, 1, date3, datetime(2011, 2, 1)),
(1, 0, 1, date4, datetime(2011, 2, 1)),
(1, 0, 2, date1, datetime(2011, 1, 5)),
(1, 0, 2, date2, datetime(2011, 2, 2)),
(1, 0, 2, date3, datetime(2011, 2, 2)),
(1, 0, 2, date4, datetime(2011, 2, 2)),
(1, 2, 1, date1, datetime(2011, 1, 18)),
(1, 2, 1, date2, datetime(2011, 1, 18)),
(1, 2, 1, date3, datetime(2011, 2, 15)),
(1, 2, 1, date4, datetime(2011, 2, 15)),
(2, 2, 1, date1, datetime(2011, 2, 15)),
(2, 2, 1, date2, datetime(2011, 2, 15)),
(2, 2, 1, date3, datetime(2011, 3, 15)),
(2, 2, 1, date4, datetime(2011, 3, 15)),
]
for n, week, weekday, date, expected in test_cases:
offset = WeekOfMonth(n, week=week, weekday=weekday)
assertEq(offset, date, expected)
# try subtracting
result = datetime(2011, 2, 1) - WeekOfMonth(week=1, weekday=2)
self.assertEqual(result, datetime(2011, 1, 12))
result = datetime(2011, 2, 3) - WeekOfMonth(week=0, weekday=2)
self.assertEqual(result, datetime(2011, 2, 2))
def test_onOffset(self):
test_cases = [
(0, 0, datetime(2011, 2, 7), True),
(0, 0, datetime(2011, 2, 6), False),
(0, 0, datetime(2011, 2, 14), False),
(1, 0, datetime(2011, 2, 14), True),
(0, 1, datetime(2011, 2, 1), True),
(0, 1, datetime(2011, 2, 8), False),
]
for week, weekday, date, expected in test_cases:
offset = WeekOfMonth(week=week, weekday=weekday)
self.assertEqual(offset.onOffset(date), expected)
class TestLastWeekOfMonth(Base):
_offset = LastWeekOfMonth
def test_constructor(self):
assertRaisesRegexp(ValueError, "^N cannot be 0", \
LastWeekOfMonth, n=0, weekday=1)
assertRaisesRegexp(ValueError, "^Day", LastWeekOfMonth, n=1, weekday=-1)
assertRaisesRegexp(ValueError, "^Day", LastWeekOfMonth, n=1, weekday=7)
def test_offset(self):
#### Saturday
last_sat = datetime(2013,8,31)
next_sat = datetime(2013,9,28)
offset_sat = LastWeekOfMonth(n=1, weekday=5)
one_day_before = (last_sat + timedelta(days=-1))
self.assertEqual(one_day_before + offset_sat, last_sat)
one_day_after = (last_sat + timedelta(days=+1))
self.assertEqual(one_day_after + offset_sat, next_sat)
#Test On that day
self.assertEqual(last_sat + offset_sat, next_sat)
#### Thursday
offset_thur = LastWeekOfMonth(n=1, weekday=3)
last_thurs = datetime(2013,1,31)
next_thurs = datetime(2013,2,28)
one_day_before = last_thurs + timedelta(days=-1)
self.assertEqual(one_day_before + offset_thur, last_thurs)
one_day_after = last_thurs + timedelta(days=+1)
self.assertEqual(one_day_after + offset_thur, next_thurs)
# Test on that day
self.assertEqual(last_thurs + offset_thur, next_thurs)
three_before = last_thurs + timedelta(days=-3)
self.assertEqual(three_before + offset_thur, last_thurs)
two_after = last_thurs + timedelta(days=+2)
self.assertEqual(two_after + offset_thur, next_thurs)
offset_sunday = LastWeekOfMonth(n=1, weekday=WeekDay.SUN)
self.assertEqual(datetime(2013,7,31) + offset_sunday, datetime(2013,8,25))
def test_onOffset(self):
test_cases = [
(WeekDay.SUN, datetime(2013, 1, 27), True),
(WeekDay.SAT, datetime(2013, 3, 30), True),
(WeekDay.MON, datetime(2013, 2, 18), False), #Not the last Mon
(WeekDay.SUN, datetime(2013, 2, 25), False), #Not a SUN
(WeekDay.MON, datetime(2013, 2, 25), True),
(WeekDay.SAT, datetime(2013, 11, 30), True),
(WeekDay.SAT, datetime(2006, 8, 26), True),
(WeekDay.SAT, datetime(2007, 8, 25), True),
(WeekDay.SAT, datetime(2008, 8, 30), True),
(WeekDay.SAT, datetime(2009, 8, 29), True),
(WeekDay.SAT, datetime(2010, 8, 28), True),
(WeekDay.SAT, datetime(2011, 8, 27), True),
(WeekDay.SAT, datetime(2019, 8, 31), True),
]
for weekday, date, expected in test_cases:
offset = LastWeekOfMonth(weekday=weekday)
self.assertEqual(offset.onOffset(date), expected, msg=date)
class TestBMonthBegin(Base):
_offset = BMonthBegin
def test_offset(self):
tests = []
tests.append((BMonthBegin(),
{datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2006, 9, 1): datetime(2006, 10, 2),
datetime(2007, 1, 1): datetime(2007, 2, 1),
datetime(2006, 12, 1): datetime(2007, 1, 1)}))
tests.append((BMonthBegin(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2006, 10, 2): datetime(2006, 10, 2),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2006, 9, 15): datetime(2006, 10, 2)}))
tests.append((BMonthBegin(2),
{datetime(2008, 1, 1): datetime(2008, 3, 3),
datetime(2008, 1, 15): datetime(2008, 3, 3),
datetime(2006, 12, 29): datetime(2007, 2, 1),
datetime(2006, 12, 31): datetime(2007, 2, 1),
datetime(2007, 1, 1): datetime(2007, 3, 1),
datetime(2006, 11, 1): datetime(2007, 1, 1)}))
tests.append((BMonthBegin(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 1),
datetime(2008, 6, 30): datetime(2008, 6, 2),
datetime(2008, 6, 1): datetime(2008, 5, 1),
datetime(2008, 3, 10): datetime(2008, 3, 3),
datetime(2008, 12, 31): datetime(2008, 12, 1),
datetime(2006, 12, 29): datetime(2006, 12, 1),
datetime(2006, 12, 30): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 1)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_onOffset(self):
tests = [(BMonthBegin(), datetime(2007, 12, 31), False),
(BMonthBegin(), datetime(2008, 1, 1), True),
(BMonthBegin(), datetime(2001, 4, 2), True),
(BMonthBegin(), datetime(2008, 3, 3), True)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = BMonthBegin()
offset2 = BMonthBegin()
self.assertFalse(offset1 != offset2)
class TestBMonthEnd(Base):
_offset = BMonthEnd
def test_offset(self):
tests = []
tests.append((BMonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 29)}))
tests.append((BMonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 29),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
tests.append((BMonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 2, 28),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 29)}))
tests.append((BMonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 29),
datetime(2008, 6, 30): datetime(2008, 5, 30),
datetime(2008, 12, 31): datetime(2008, 11, 28),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 12, 29),
datetime(2007, 1, 1): datetime(2006, 12, 29)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_normalize(self):
dt = datetime(2007, 1, 1, 3)
result = dt + BMonthEnd(normalize=True)
expected = dt.replace(hour=0) + BMonthEnd()
self.assertEqual(result, expected)
def test_onOffset(self):
tests = [(BMonthEnd(), datetime(2007, 12, 31), True),
(BMonthEnd(), datetime(2008, 1, 1), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = BMonthEnd()
offset2 = BMonthEnd()
self.assertFalse(offset1 != offset2)
class TestMonthBegin(Base):
_offset = MonthBegin
def test_offset(self):
tests = []
# NOTE: I'm not entirely happy with the logic here for Begin -ss
# see thread 'offset conventions' on the ML
tests.append((MonthBegin(),
{datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 2, 1): datetime(2008, 3, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2006, 12, 1): datetime(2007, 1, 1),
datetime(2007, 1, 31): datetime(2007, 2, 1)}))
tests.append((MonthBegin(0),
{datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2006, 12, 3): datetime(2007, 1, 1),
datetime(2007, 1, 31): datetime(2007, 2, 1)}))
tests.append((MonthBegin(2),
{datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 1, 31): datetime(2008, 3, 1),
datetime(2006, 12, 31): datetime(2007, 2, 1),
datetime(2007, 12, 28): datetime(2008, 2, 1),
datetime(2007, 1, 1): datetime(2007, 3, 1),
datetime(2006, 11, 1): datetime(2007, 1, 1)}))
tests.append((MonthBegin(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 1),
datetime(2008, 5, 31): datetime(2008, 5, 1),
datetime(2008, 12, 31): datetime(2008, 12, 1),
datetime(2006, 12, 29): datetime(2006, 12, 1),
datetime(2006, 1, 2): datetime(2006, 1, 1)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
class TestMonthEnd(Base):
_offset = MonthEnd
def test_offset(self):
tests = []
tests.append((MonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 31)}))
tests.append((MonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
tests.append((MonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 31)}))
tests.append((MonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 11, 30),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
# def test_day_of_month(self):
# dt = datetime(2007, 1, 1)
# offset = MonthEnd(day=20)
# result = dt + offset
# self.assertEqual(result, datetime(2007, 1, 20))
# result = result + offset
# self.assertEqual(result, datetime(2007, 2, 20))
def test_normalize(self):
dt = datetime(2007, 1, 1, 3)
result = dt + MonthEnd(normalize=True)
expected = dt.replace(hour=0) + MonthEnd()
self.assertEqual(result, expected)
def test_onOffset(self):
tests = [(MonthEnd(), datetime(2007, 12, 31), True),
(MonthEnd(), datetime(2008, 1, 1), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBQuarterBegin(Base):
_offset = BQuarterBegin
def test_repr(self):
self.assertEqual(repr(BQuarterBegin()),"<BusinessQuarterBegin: startingMonth=3>")
self.assertEqual(repr(BQuarterBegin(startingMonth=3)), "<BusinessQuarterBegin: startingMonth=3>")
self.assertEqual(repr(BQuarterBegin(startingMonth=1)), "<BusinessQuarterBegin: startingMonth=1>")
def test_isAnchored(self):
self.assertTrue(BQuarterBegin(startingMonth=1).isAnchored())
self.assertTrue(BQuarterBegin().isAnchored())
self.assertFalse(BQuarterBegin(2, startingMonth=1).isAnchored())
def test_offset(self):
tests = []
tests.append((BQuarterBegin(startingMonth=1),
{datetime(2008, 1, 1): datetime(2008, 4, 1),
datetime(2008, 1, 31): datetime(2008, 4, 1),
datetime(2008, 2, 15): datetime(2008, 4, 1),
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 3, 15): datetime(2008, 4, 1),
datetime(2008, 3, 31): datetime(2008, 4, 1),
datetime(2008, 4, 15): datetime(2008, 7, 1),
datetime(2007, 3, 15): datetime(2007, 4, 2),
datetime(2007, 2, 28): datetime(2007, 4, 2),
datetime(2007, 1, 1): datetime(2007, 4, 2),
datetime(2007, 4, 15): datetime(2007, 7, 2),
datetime(2007, 7, 1): datetime(2007, 7, 2),
datetime(2007, 4, 1): datetime(2007, 4, 2),
datetime(2007, 4, 2): datetime(2007, 7, 2),
datetime(2008, 4, 30): datetime(2008, 7, 1), }))
tests.append((BQuarterBegin(startingMonth=2),
{datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 2, 29): datetime(2008, 5, 1),
datetime(2008, 3, 15): datetime(2008, 5, 1),
datetime(2008, 3, 31): datetime(2008, 5, 1),
datetime(2008, 4, 15): datetime(2008, 5, 1),
datetime(2008, 8, 15): datetime(2008, 11, 3),
datetime(2008, 9, 15): datetime(2008, 11, 3),
datetime(2008, 11, 1): datetime(2008, 11, 3),
datetime(2008, 4, 30): datetime(2008, 5, 1), }))
tests.append((BQuarterBegin(startingMonth=1, n=0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2007, 12, 31): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 4, 1),
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 1, 15): datetime(2008, 4, 1),
datetime(2008, 2, 27): datetime(2008, 4, 1),
datetime(2008, 3, 15): datetime(2008, 4, 1),
datetime(2007, 4, 1): datetime(2007, 4, 2),
datetime(2007, 4, 2): datetime(2007, 4, 2),
datetime(2007, 7, 1): datetime(2007, 7, 2),
datetime(2007, 4, 15): datetime(2007, 7, 2),
datetime(2007, 7, 2): datetime(2007, 7, 2), }))
tests.append((BQuarterBegin(startingMonth=1, n=-1),
{datetime(2008, 1, 1): datetime(2007, 10, 1),
datetime(2008, 1, 31): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 1, 1),
datetime(2008, 2, 29): datetime(2008, 1, 1),
datetime(2008, 3, 15): datetime(2008, 1, 1),
datetime(2008, 3, 31): datetime(2008, 1, 1),
datetime(2008, 4, 15): datetime(2008, 4, 1),
datetime(2007, 7, 3): datetime(2007, 7, 2),
datetime(2007, 4, 3): datetime(2007, 4, 2),
datetime(2007, 7, 2): datetime(2007, 4, 2),
datetime(2008, 4, 1): datetime(2008, 1, 1), }))
tests.append((BQuarterBegin(startingMonth=1, n=2),
{datetime(2008, 1, 1): datetime(2008, 7, 1),
datetime(2008, 1, 15): datetime(2008, 7, 1),
datetime(2008, 2, 29): datetime(2008, 7, 1),
datetime(2008, 3, 15): datetime(2008, 7, 1),
datetime(2007, 3, 31): datetime(2007, 7, 2),
datetime(2007, 4, 15): datetime(2007, 10, 1),
datetime(2008, 4, 30): datetime(2008, 10, 1), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
# corner
offset = BQuarterBegin(n=-1, startingMonth=1)
self.assertEqual(datetime(2007, 4, 3) + offset, datetime(2007, 4, 2))
class TestBQuarterEnd(Base):
_offset = BQuarterEnd
def test_repr(self):
self.assertEqual(repr(BQuarterEnd()),"<BusinessQuarterEnd: startingMonth=3>")
self.assertEqual(repr(BQuarterEnd(startingMonth=3)), "<BusinessQuarterEnd: startingMonth=3>")
self.assertEqual(repr(BQuarterEnd(startingMonth=1)), "<BusinessQuarterEnd: startingMonth=1>")
def test_isAnchored(self):
self.assertTrue(BQuarterEnd(startingMonth=1).isAnchored())
self.assertTrue(BQuarterEnd().isAnchored())
self.assertFalse(BQuarterEnd(2, startingMonth=1).isAnchored())
def test_offset(self):
tests = []
tests.append((BQuarterEnd(startingMonth=1),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 4, 30),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 7, 31), }))
tests.append((BQuarterEnd(startingMonth=2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2008, 2, 15): datetime(2008, 2, 29),
datetime(2008, 2, 29): datetime(2008, 5, 30),
datetime(2008, 3, 15): datetime(2008, 5, 30),
datetime(2008, 3, 31): datetime(2008, 5, 30),
datetime(2008, 4, 15): datetime(2008, 5, 30),
datetime(2008, 4, 30): datetime(2008, 5, 30), }))
tests.append((BQuarterEnd(startingMonth=1, n=0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 4, 30), }))
tests.append((BQuarterEnd(startingMonth=1, n=-1),
{datetime(2008, 1, 1): datetime(2007, 10, 31),
datetime(2008, 1, 31): datetime(2007, 10, 31),
datetime(2008, 2, 15): datetime(2008, 1, 31),
datetime(2008, 2, 29): datetime(2008, 1, 31),
datetime(2008, 3, 15): datetime(2008, 1, 31),
datetime(2008, 3, 31): datetime(2008, 1, 31),
datetime(2008, 4, 15): datetime(2008, 1, 31),
datetime(2008, 4, 30): datetime(2008, 1, 31), }))
tests.append((BQuarterEnd(startingMonth=1, n=2),
{datetime(2008, 1, 31): datetime(2008, 7, 31),
datetime(2008, 2, 15): datetime(2008, 7, 31),
datetime(2008, 2, 29): datetime(2008, 7, 31),
datetime(2008, 3, 15): datetime(2008, 7, 31),
datetime(2008, 3, 31): datetime(2008, 7, 31),
datetime(2008, 4, 15): datetime(2008, 7, 31),
datetime(2008, 4, 30): datetime(2008, 10, 31), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
# corner
offset = BQuarterEnd(n=-1, startingMonth=1)
self.assertEqual(datetime(2010, 1, 31) + offset, datetime(2010, 1, 29))
def test_onOffset(self):
tests = [
(BQuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), True),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), True),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), True),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), True),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def makeFY5253LastOfMonthQuarter(*args, **kwds):
return FY5253Quarter(*args, variation="last", **kwds)
def makeFY5253NearestEndMonthQuarter(*args, **kwds):
return FY5253Quarter(*args, variation="nearest", **kwds)
def makeFY5253NearestEndMonth(*args, **kwds):
return FY5253(*args, variation="nearest", **kwds)
def makeFY5253LastOfMonth(*args, **kwds):
return FY5253(*args, variation="last", **kwds)
class TestFY5253LastOfMonth(Base):
def test_onOffset(self):
offset_lom_sat_aug = makeFY5253LastOfMonth(1, startingMonth=8, weekday=WeekDay.SAT)
offset_lom_sat_sep = makeFY5253LastOfMonth(1, startingMonth=9, weekday=WeekDay.SAT)
tests = [
#From Wikipedia (see: http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar#Last_Saturday_of_the_month_at_fiscal_year_end)
(offset_lom_sat_aug, datetime(2006, 8, 26), True),
(offset_lom_sat_aug, datetime(2007, 8, 25), True),
(offset_lom_sat_aug, datetime(2008, 8, 30), True),
(offset_lom_sat_aug, datetime(2009, 8, 29), True),
(offset_lom_sat_aug, datetime(2010, 8, 28), True),
(offset_lom_sat_aug, datetime(2011, 8, 27), True),
(offset_lom_sat_aug, datetime(2012, 8, 25), True),
(offset_lom_sat_aug, datetime(2013, 8, 31), True),
(offset_lom_sat_aug, datetime(2014, 8, 30), True),
(offset_lom_sat_aug, datetime(2015, 8, 29), True),
(offset_lom_sat_aug, datetime(2016, 8, 27), True),
(offset_lom_sat_aug, datetime(2017, 8, 26), True),
(offset_lom_sat_aug, datetime(2018, 8, 25), True),
(offset_lom_sat_aug, datetime(2019, 8, 31), True),
(offset_lom_sat_aug, datetime(2006, 8, 27), False),
(offset_lom_sat_aug, datetime(2007, 8, 28), False),
(offset_lom_sat_aug, datetime(2008, 8, 31), False),
(offset_lom_sat_aug, datetime(2009, 8, 30), False),
(offset_lom_sat_aug, datetime(2010, 8, 29), False),
(offset_lom_sat_aug, datetime(2011, 8, 28), False),
(offset_lom_sat_aug, datetime(2006, 8, 25), False),
(offset_lom_sat_aug, datetime(2007, 8, 24), False),
(offset_lom_sat_aug, datetime(2008, 8, 29), False),
(offset_lom_sat_aug, datetime(2009, 8, 28), False),
(offset_lom_sat_aug, datetime(2010, 8, 27), False),
(offset_lom_sat_aug, datetime(2011, 8, 26), False),
(offset_lom_sat_aug, datetime(2019, 8, 30), False),
#From GMCR (see for example: http://yahoo.brand.edgar-online.com/Default.aspx?companyid=3184&formtypeID=7)
(offset_lom_sat_sep, datetime(2010, 9, 25), True),
(offset_lom_sat_sep, datetime(2011, 9, 24), True),
(offset_lom_sat_sep, datetime(2012, 9, 29), True),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
offset_lom_aug_sat = makeFY5253LastOfMonth(startingMonth=8, weekday=WeekDay.SAT)
offset_lom_aug_sat_1 = makeFY5253LastOfMonth(n=1, startingMonth=8, weekday=WeekDay.SAT)
date_seq_lom_aug_sat = [datetime(2006, 8, 26), datetime(2007, 8, 25),
datetime(2008, 8, 30), datetime(2009, 8, 29),
datetime(2010, 8, 28), datetime(2011, 8, 27),
datetime(2012, 8, 25), datetime(2013, 8, 31),
datetime(2014, 8, 30), datetime(2015, 8, 29),
datetime(2016, 8, 27)]
tests = [
(offset_lom_aug_sat, date_seq_lom_aug_sat),
(offset_lom_aug_sat_1, date_seq_lom_aug_sat),
(offset_lom_aug_sat, [datetime(2006, 8, 25)] + date_seq_lom_aug_sat),
(offset_lom_aug_sat_1, [datetime(2006, 8, 27)] + date_seq_lom_aug_sat[1:]),
(makeFY5253LastOfMonth(n=-1, startingMonth=8, weekday=WeekDay.SAT), list(reversed(date_seq_lom_aug_sat))),
]
for test in tests:
offset, data = test
current = data[0]
for datum in data[1:]:
current = current + offset
self.assertEqual(current, datum)
class TestFY5253NearestEndMonth(Base):
def test_get_target_month_end(self):
self.assertEqual(makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SAT).get_target_month_end(datetime(2013,1,1)), datetime(2013,8,31))
self.assertEqual(makeFY5253NearestEndMonth(startingMonth=12, weekday=WeekDay.SAT).get_target_month_end(datetime(2013,1,1)), datetime(2013,12,31))
self.assertEqual(makeFY5253NearestEndMonth(startingMonth=2, weekday=WeekDay.SAT).get_target_month_end(datetime(2013,1,1)), datetime(2013,2,28))
def test_get_year_end(self):
self.assertEqual(makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SAT).get_year_end(datetime(2013,1,1)), datetime(2013,8,31))
self.assertEqual(makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SUN).get_year_end(datetime(2013,1,1)), datetime(2013,9,1))
self.assertEqual(makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.FRI).get_year_end(datetime(2013,1,1)), datetime(2013,8,30))
offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12,
variation="nearest")
self.assertEqual(offset_n.get_year_end(datetime(2012,1,1)), datetime(2013,1,1))
self.assertEqual(offset_n.get_year_end(datetime(2012,1,10)), datetime(2013,1,1))
self.assertEqual(offset_n.get_year_end(datetime(2013,1,1)), datetime(2013,12,31))
self.assertEqual(offset_n.get_year_end(datetime(2013,1,2)), datetime(2013,12,31))
self.assertEqual(offset_n.get_year_end(datetime(2013,1,3)), datetime(2013,12,31))
self.assertEqual(offset_n.get_year_end(datetime(2013,1,10)), datetime(2013,12,31))
JNJ = FY5253(n=1, startingMonth=12, weekday=6, variation="nearest")
self.assertEqual(JNJ.get_year_end(datetime(2006, 1, 1)), datetime(2006, 12, 31))
def test_onOffset(self):
offset_lom_aug_sat = makeFY5253NearestEndMonth(1, startingMonth=8, weekday=WeekDay.SAT)
offset_lom_aug_thu = makeFY5253NearestEndMonth(1, startingMonth=8, weekday=WeekDay.THU)
offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12,
variation="nearest")
tests = [
# From Wikipedia (see: http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar#Saturday_nearest_the_end_of_month)
# 2006-09-02 2006 September 2
# 2007-09-01 2007 September 1
# 2008-08-30 2008 August 30 (leap year)
# 2009-08-29 2009 August 29
# 2010-08-28 2010 August 28
# 2011-09-03 2011 September 3
# 2012-09-01 2012 September 1 (leap year)
# 2013-08-31 2013 August 31
# 2014-08-30 2014 August 30
# 2015-08-29 2015 August 29
# 2016-09-03 2016 September 3 (leap year)
# 2017-09-02 2017 September 2
# 2018-09-01 2018 September 1
# 2019-08-31 2019 August 31
(offset_lom_aug_sat, datetime(2006, 9, 2), True),
(offset_lom_aug_sat, datetime(2007, 9, 1), True),
(offset_lom_aug_sat, datetime(2008, 8, 30), True),
(offset_lom_aug_sat, datetime(2009, 8, 29), True),
(offset_lom_aug_sat, datetime(2010, 8, 28), True),
(offset_lom_aug_sat, datetime(2011, 9, 3), True),
(offset_lom_aug_sat, datetime(2016, 9, 3), True),
(offset_lom_aug_sat, datetime(2017, 9, 2), True),
(offset_lom_aug_sat, datetime(2018, 9, 1), True),
(offset_lom_aug_sat, datetime(2019, 8, 31), True),
(offset_lom_aug_sat, datetime(2006, 8, 27), False),
(offset_lom_aug_sat, datetime(2007, 8, 28), False),
(offset_lom_aug_sat, datetime(2008, 8, 31), False),
(offset_lom_aug_sat, datetime(2009, 8, 30), False),
(offset_lom_aug_sat, datetime(2010, 8, 29), False),
(offset_lom_aug_sat, datetime(2011, 8, 28), False),
(offset_lom_aug_sat, datetime(2006, 8, 25), False),
(offset_lom_aug_sat, datetime(2007, 8, 24), False),
(offset_lom_aug_sat, datetime(2008, 8, 29), False),
(offset_lom_aug_sat, datetime(2009, 8, 28), False),
(offset_lom_aug_sat, datetime(2010, 8, 27), False),
(offset_lom_aug_sat, datetime(2011, 8, 26), False),
(offset_lom_aug_sat, datetime(2019, 8, 30), False),
#From Micron, see: http://google.brand.edgar-online.com/?sym=MU&formtypeID=7
(offset_lom_aug_thu, datetime(2012, 8, 30), True),
(offset_lom_aug_thu, datetime(2011, 9, 1), True),
(offset_n, datetime(2012, 12, 31), False),
(offset_n, datetime(2013, 1, 1), True),
(offset_n, datetime(2013, 1, 2), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
date_seq_nem_8_sat = [datetime(2006, 9, 2), datetime(2007, 9, 1),
datetime(2008, 8, 30), datetime(2009, 8, 29),
datetime(2010, 8, 28), datetime(2011, 9, 3)]
JNJ = [datetime(2005, 1, 2), datetime(2006, 1, 1),
datetime(2006, 12, 31), datetime(2007, 12, 30),
datetime(2008, 12, 28), datetime(2010, 1, 3),
datetime(2011, 1, 2), datetime(2012, 1, 1),
datetime(2012, 12, 30)]
DEC_SAT = FY5253(n=-1, startingMonth=12, weekday=5, variation="nearest")
tests = [
(makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SAT), date_seq_nem_8_sat),
(makeFY5253NearestEndMonth(n=1, startingMonth=8, weekday=WeekDay.SAT), date_seq_nem_8_sat),
(makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SAT), [datetime(2006, 9, 1)] + date_seq_nem_8_sat),
(makeFY5253NearestEndMonth(n=1, startingMonth=8, weekday=WeekDay.SAT), [datetime(2006, 9, 3)] + date_seq_nem_8_sat[1:]),
(makeFY5253NearestEndMonth(n=-1, startingMonth=8, weekday=WeekDay.SAT), list(reversed(date_seq_nem_8_sat))),
(makeFY5253NearestEndMonth(n=1, startingMonth=12, weekday=WeekDay.SUN), JNJ),
(makeFY5253NearestEndMonth(n=-1, startingMonth=12, weekday=WeekDay.SUN), list(reversed(JNJ))),
(makeFY5253NearestEndMonth(n=1, startingMonth=12, weekday=WeekDay.SUN), [datetime(2005,1,2), datetime(2006, 1, 1)]),
(makeFY5253NearestEndMonth(n=1, startingMonth=12, weekday=WeekDay.SUN), [datetime(2006,1,2), datetime(2006, 12, 31)]),
(DEC_SAT, [datetime(2013,1,15), datetime(2012,12,29)])
]
for test in tests:
offset, data = test
current = data[0]
for datum in data[1:]:
current = current + offset
self.assertEqual(current, datum)
class TestFY5253LastOfMonthQuarter(Base):
def test_isAnchored(self):
self.assertTrue(makeFY5253LastOfMonthQuarter(startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4).isAnchored())
self.assertTrue(makeFY5253LastOfMonthQuarter(weekday=WeekDay.SAT, startingMonth=3, qtr_with_extra_week=4).isAnchored())
self.assertFalse(makeFY5253LastOfMonthQuarter(2, startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4).isAnchored())
def test_equality(self):
self.assertEqual(makeFY5253LastOfMonthQuarter(startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4), makeFY5253LastOfMonthQuarter(startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4))
self.assertNotEqual(makeFY5253LastOfMonthQuarter(startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4), makeFY5253LastOfMonthQuarter(startingMonth=1, weekday=WeekDay.SUN, qtr_with_extra_week=4))
self.assertNotEqual(makeFY5253LastOfMonthQuarter(startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4), makeFY5253LastOfMonthQuarter(startingMonth=2, weekday=WeekDay.SAT, qtr_with_extra_week=4))
def test_offset(self):
offset = makeFY5253LastOfMonthQuarter(1, startingMonth=9, weekday=WeekDay.SAT, qtr_with_extra_week=4)
offset2 = makeFY5253LastOfMonthQuarter(2, startingMonth=9, weekday=WeekDay.SAT, qtr_with_extra_week=4)
offset4 = makeFY5253LastOfMonthQuarter(4, startingMonth=9, weekday=WeekDay.SAT, qtr_with_extra_week=4)
offset_neg1 = makeFY5253LastOfMonthQuarter(-1, startingMonth=9, weekday=WeekDay.SAT, qtr_with_extra_week=4)
offset_neg2 = makeFY5253LastOfMonthQuarter(-2, startingMonth=9, weekday=WeekDay.SAT, qtr_with_extra_week=4)
GMCR = [datetime(2010, 3, 27),
datetime(2010, 6, 26),
datetime(2010, 9, 25),
datetime(2010, 12, 25),
datetime(2011, 3, 26),
datetime(2011, 6, 25),
datetime(2011, 9, 24),
datetime(2011, 12, 24),
datetime(2012, 3, 24),
datetime(2012, 6, 23),
datetime(2012, 9, 29),
datetime(2012, 12, 29),
datetime(2013, 3, 30),
datetime(2013, 6, 29)]
assertEq(offset, base=GMCR[0], expected=GMCR[1])
assertEq(offset, base=GMCR[0] + relativedelta(days=-1), expected=GMCR[0])
assertEq(offset, base=GMCR[1], expected=GMCR[2])
assertEq(offset2, base=GMCR[0], expected=GMCR[2])
assertEq(offset4, base=GMCR[0], expected=GMCR[4])
assertEq(offset_neg1, base=GMCR[-1], expected=GMCR[-2])
assertEq(offset_neg1, base=GMCR[-1] + relativedelta(days=+1), expected=GMCR[-1])
assertEq(offset_neg2, base=GMCR[-1], expected=GMCR[-3])
date = GMCR[0] + relativedelta(days=-1)
for expected in GMCR:
assertEq(offset, date, expected)
date = date + offset
date = GMCR[-1] + relativedelta(days=+1)
for expected in reversed(GMCR):
assertEq(offset_neg1, date, expected)
date = date + offset_neg1
def test_onOffset(self):
lomq_aug_sat_4 = makeFY5253LastOfMonthQuarter(1, startingMonth=8, weekday=WeekDay.SAT, qtr_with_extra_week=4)
lomq_sep_sat_4 = makeFY5253LastOfMonthQuarter(1, startingMonth=9, weekday=WeekDay.SAT, qtr_with_extra_week=4)
tests = [
#From Wikipedia
(lomq_aug_sat_4, datetime(2006, 8, 26), True),
(lomq_aug_sat_4, datetime(2007, 8, 25), True),
(lomq_aug_sat_4, datetime(2008, 8, 30), True),
(lomq_aug_sat_4, datetime(2009, 8, 29), True),
(lomq_aug_sat_4, datetime(2010, 8, 28), True),
(lomq_aug_sat_4, datetime(2011, 8, 27), True),
(lomq_aug_sat_4, datetime(2019, 8, 31), True),
(lomq_aug_sat_4, datetime(2006, 8, 27), False),
(lomq_aug_sat_4, datetime(2007, 8, 28), False),
(lomq_aug_sat_4, datetime(2008, 8, 31), False),
(lomq_aug_sat_4, datetime(2009, 8, 30), False),
(lomq_aug_sat_4, datetime(2010, 8, 29), False),
(lomq_aug_sat_4, datetime(2011, 8, 28), False),
(lomq_aug_sat_4, datetime(2006, 8, 25), False),
(lomq_aug_sat_4, datetime(2007, 8, 24), False),
(lomq_aug_sat_4, datetime(2008, 8, 29), False),
(lomq_aug_sat_4, datetime(2009, 8, 28), False),
(lomq_aug_sat_4, datetime(2010, 8, 27), False),
(lomq_aug_sat_4, datetime(2011, 8, 26), False),
(lomq_aug_sat_4, datetime(2019, 8, 30), False),
#From GMCR
(lomq_sep_sat_4, datetime(2010, 9, 25), True),
(lomq_sep_sat_4, datetime(2011, 9, 24), True),
(lomq_sep_sat_4, datetime(2012, 9, 29), True),
(lomq_sep_sat_4, datetime(2013, 6, 29), True),
(lomq_sep_sat_4, datetime(2012, 6, 23), True),
(lomq_sep_sat_4, datetime(2012, 6, 30), False),
(lomq_sep_sat_4, datetime(2013, 3, 30), True),
(lomq_sep_sat_4, datetime(2012, 3, 24), True),
(lomq_sep_sat_4, datetime(2012, 12, 29), True),
(lomq_sep_sat_4, datetime(2011, 12, 24), True),
#INTC (extra week in Q1)
#See: http://www.intc.com/releasedetail.cfm?ReleaseID=542844
(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1), datetime(2011, 4, 2), True),
#see: http://google.brand.edgar-online.com/?sym=INTC&formtypeID=7
(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1), datetime(2012, 12, 29), True),
(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1), datetime(2011, 12, 31), True),
(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1), datetime(2010, 12, 25), True),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_year_has_extra_week(self):
#End of long Q1
self.assertTrue(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).year_has_extra_week(datetime(2011, 4, 2)))
#Start of long Q1
self.assertTrue(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).year_has_extra_week(datetime(2010, 12, 26)))
#End of year before year with long Q1
self.assertFalse(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).year_has_extra_week(datetime(2010, 12, 25)))
for year in [x for x in range(1994, 2011+1) if x not in [2011, 2005, 2000, 1994]]:
self.assertFalse(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).year_has_extra_week(datetime(year, 4, 2)))
#Other long years
self.assertTrue(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).year_has_extra_week(datetime(2005, 4, 2)))
self.assertTrue(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).year_has_extra_week(datetime(2000, 4, 2)))
self.assertTrue(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).year_has_extra_week(datetime(1994, 4, 2)))
def test_get_weeks(self):
sat_dec_1 = makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1)
sat_dec_4 = makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=4)
self.assertEqual(sat_dec_1.get_weeks(datetime(2011, 4, 2)), [14, 13, 13, 13])
self.assertEqual(sat_dec_4.get_weeks(datetime(2011, 4, 2)), [13, 13, 13, 14])
self.assertEqual(sat_dec_1.get_weeks(datetime(2010, 12, 25)), [13, 13, 13, 13])
class TestFY5253NearestEndMonthQuarter(Base):
def test_onOffset(self):
offset_nem_sat_aug_4 = makeFY5253NearestEndMonthQuarter(1, startingMonth=8, weekday=WeekDay.SAT, qtr_with_extra_week=4)
offset_nem_thu_aug_4 = makeFY5253NearestEndMonthQuarter(1, startingMonth=8, weekday=WeekDay.THU, qtr_with_extra_week=4)
offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12,
variation="nearest", qtr_with_extra_week=4)
tests = [
#From Wikipedia
(offset_nem_sat_aug_4, datetime(2006, 9, 2), True),
(offset_nem_sat_aug_4, datetime(2007, 9, 1), True),
(offset_nem_sat_aug_4, datetime(2008, 8, 30), True),
(offset_nem_sat_aug_4, datetime(2009, 8, 29), True),
(offset_nem_sat_aug_4, datetime(2010, 8, 28), True),
(offset_nem_sat_aug_4, datetime(2011, 9, 3), True),
(offset_nem_sat_aug_4, datetime(2016, 9, 3), True),
(offset_nem_sat_aug_4, datetime(2017, 9, 2), True),
(offset_nem_sat_aug_4, datetime(2018, 9, 1), True),
(offset_nem_sat_aug_4, datetime(2019, 8, 31), True),
(offset_nem_sat_aug_4, datetime(2006, 8, 27), False),
(offset_nem_sat_aug_4, datetime(2007, 8, 28), False),
(offset_nem_sat_aug_4, datetime(2008, 8, 31), False),
(offset_nem_sat_aug_4, datetime(2009, 8, 30), False),
(offset_nem_sat_aug_4, datetime(2010, 8, 29), False),
(offset_nem_sat_aug_4, datetime(2011, 8, 28), False),
(offset_nem_sat_aug_4, datetime(2006, 8, 25), False),
(offset_nem_sat_aug_4, datetime(2007, 8, 24), False),
(offset_nem_sat_aug_4, datetime(2008, 8, 29), False),
(offset_nem_sat_aug_4, datetime(2009, 8, 28), False),
(offset_nem_sat_aug_4, datetime(2010, 8, 27), False),
(offset_nem_sat_aug_4, datetime(2011, 8, 26), False),
(offset_nem_sat_aug_4, datetime(2019, 8, 30), False),
#From Micron, see: http://google.brand.edgar-online.com/?sym=MU&formtypeID=7
(offset_nem_thu_aug_4, datetime(2012, 8, 30), True),
(offset_nem_thu_aug_4, datetime(2011, 9, 1), True),
#See: http://google.brand.edgar-online.com/?sym=MU&formtypeID=13
(offset_nem_thu_aug_4, datetime(2013, 5, 30), True),
(offset_nem_thu_aug_4, datetime(2013, 2, 28), True),
(offset_nem_thu_aug_4, datetime(2012, 11, 29), True),
(offset_nem_thu_aug_4, datetime(2012, 5, 31), True),
(offset_nem_thu_aug_4, datetime(2007, 3, 1), True),
(offset_nem_thu_aug_4, datetime(1994, 3, 3), True),
(offset_n, datetime(2012, 12, 31), False),
(offset_n, datetime(2013, 1, 1), True),
(offset_n, datetime(2013, 1, 2), False)
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_offset(self):
offset = makeFY5253NearestEndMonthQuarter(1, startingMonth=8, weekday=WeekDay.THU, qtr_with_extra_week=4)
MU = [datetime(2012, 5, 31), datetime(2012, 8, 30), datetime(2012, 11, 29), datetime(2013, 2, 28), datetime(2013, 5, 30)]
date = MU[0] + relativedelta(days=-1)
for expected in MU:
assertEq(offset, date, expected)
date = date + offset
assertEq(offset, datetime(2012, 5, 31), datetime(2012, 8, 30))
assertEq(offset, datetime(2012, 5, 30), datetime(2012, 5, 31))
offset2 = FY5253Quarter(weekday=5, startingMonth=12,
variation="last", qtr_with_extra_week=4)
assertEq(offset2, datetime(2013,1,15), datetime(2013, 3, 30))
class TestQuarterBegin(Base):
def test_repr(self):
self.assertEqual(repr(QuarterBegin()), "<QuarterBegin: startingMonth=3>")
self.assertEqual(repr(QuarterBegin(startingMonth=3)), "<QuarterBegin: startingMonth=3>")
self.assertEqual(repr(QuarterBegin(startingMonth=1)),"<QuarterBegin: startingMonth=1>")
def test_isAnchored(self):
self.assertTrue(QuarterBegin(startingMonth=1).isAnchored())
self.assertTrue(QuarterBegin().isAnchored())
self.assertFalse(QuarterBegin(2, startingMonth=1).isAnchored())
def test_offset(self):
tests = []
tests.append((QuarterBegin(startingMonth=1),
{datetime(2007, 12, 1): datetime(2008, 1, 1),
datetime(2008, 1, 1): datetime(2008, 4, 1),
datetime(2008, 2, 15): datetime(2008, 4, 1),
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 3, 15): datetime(2008, 4, 1),
datetime(2008, 3, 31): datetime(2008, 4, 1),
datetime(2008, 4, 15): datetime(2008, 7, 1),
datetime(2008, 4, 1): datetime(2008, 7, 1), }))
tests.append((QuarterBegin(startingMonth=2),
{datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 2, 29): datetime(2008, 5, 1),
datetime(2008, 3, 15): datetime(2008, 5, 1),
datetime(2008, 3, 31): datetime(2008, 5, 1),
datetime(2008, 4, 15): datetime(2008, 5, 1),
datetime(2008, 4, 30): datetime(2008, 5, 1), }))
tests.append((QuarterBegin(startingMonth=1, n=0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 12, 1): datetime(2009, 1, 1),
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 4, 1),
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 3, 15): datetime(2008, 4, 1),
datetime(2008, 3, 31): datetime(2008, 4, 1),
datetime(2008, 4, 15): datetime(2008, 4, 1),
datetime(2008, 4, 30): datetime(2008, 4, 1), }))
tests.append((QuarterBegin(startingMonth=1, n=-1),
{datetime(2008, 1, 1): datetime(2007, 10, 1),
datetime(2008, 1, 31): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 1, 1),
datetime(2008, 2, 29): datetime(2008, 1, 1),
datetime(2008, 3, 15): datetime(2008, 1, 1),
datetime(2008, 3, 31): datetime(2008, 1, 1),
datetime(2008, 4, 15): datetime(2008, 4, 1),
datetime(2008, 4, 30): datetime(2008, 4, 1),
datetime(2008, 7, 1): datetime(2008, 4, 1)}))
tests.append((QuarterBegin(startingMonth=1, n=2),
{datetime(2008, 1, 1): datetime(2008, 7, 1),
datetime(2008, 2, 15): datetime(2008, 7, 1),
datetime(2008, 2, 29): datetime(2008, 7, 1),
datetime(2008, 3, 15): datetime(2008, 7, 1),
datetime(2008, 3, 31): datetime(2008, 7, 1),
datetime(2008, 4, 15): datetime(2008, 10, 1),
datetime(2008, 4, 1): datetime(2008, 10, 1), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
# corner
offset = QuarterBegin(n=-1, startingMonth=1)
self.assertEqual(datetime(2010, 2, 1) + offset, datetime(2010, 1, 1))
class TestQuarterEnd(Base):
_offset = QuarterEnd
def test_repr(self):
self.assertEqual(repr(QuarterEnd()), "<QuarterEnd: startingMonth=3>")
self.assertEqual(repr(QuarterEnd(startingMonth=3)), "<QuarterEnd: startingMonth=3>")
self.assertEqual(repr(QuarterEnd(startingMonth=1)), "<QuarterEnd: startingMonth=1>")
def test_isAnchored(self):
self.assertTrue(QuarterEnd(startingMonth=1).isAnchored())
self.assertTrue(QuarterEnd().isAnchored())
self.assertFalse(QuarterEnd(2, startingMonth=1).isAnchored())
def test_offset(self):
tests = []
tests.append((QuarterEnd(startingMonth=1),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 4, 30),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 7, 31), }))
tests.append((QuarterEnd(startingMonth=2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2008, 2, 15): datetime(2008, 2, 29),
datetime(2008, 2, 29): datetime(2008, 5, 31),
datetime(2008, 3, 15): datetime(2008, 5, 31),
datetime(2008, 3, 31): datetime(2008, 5, 31),
datetime(2008, 4, 15): datetime(2008, 5, 31),
datetime(2008, 4, 30): datetime(2008, 5, 31), }))
tests.append((QuarterEnd(startingMonth=1, n=0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 4, 30), }))
tests.append((QuarterEnd(startingMonth=1, n=-1),
{datetime(2008, 1, 1): datetime(2007, 10, 31),
datetime(2008, 1, 31): datetime(2007, 10, 31),
datetime(2008, 2, 15): datetime(2008, 1, 31),
datetime(2008, 2, 29): datetime(2008, 1, 31),
datetime(2008, 3, 15): datetime(2008, 1, 31),
datetime(2008, 3, 31): datetime(2008, 1, 31),
datetime(2008, 4, 15): datetime(2008, 1, 31),
datetime(2008, 4, 30): datetime(2008, 1, 31),
datetime(2008, 7, 1): datetime(2008, 4, 30)}))
tests.append((QuarterEnd(startingMonth=1, n=2),
{datetime(2008, 1, 31): datetime(2008, 7, 31),
datetime(2008, 2, 15): datetime(2008, 7, 31),
datetime(2008, 2, 29): datetime(2008, 7, 31),
datetime(2008, 3, 15): datetime(2008, 7, 31),
datetime(2008, 3, 31): datetime(2008, 7, 31),
datetime(2008, 4, 15): datetime(2008, 7, 31),
datetime(2008, 4, 30): datetime(2008, 10, 31), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
# corner
offset = QuarterEnd(n=-1, startingMonth=1)
self.assertEqual(datetime(2010, 2, 1) + offset, datetime(2010, 1, 31))
def test_onOffset(self):
tests = [(QuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
(QuarterEnd(
1, startingMonth=1), datetime(2007, 12, 31), False),
(QuarterEnd(
1, startingMonth=1), datetime(2008, 2, 29), False),
(QuarterEnd(
1, startingMonth=1), datetime(2007, 3, 30), False),
(QuarterEnd(
1, startingMonth=1), datetime(2007, 3, 31), False),
(QuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
(QuarterEnd(
1, startingMonth=1), datetime(2008, 5, 30), False),
(QuarterEnd(
1, startingMonth=1), datetime(2008, 5, 31), False),
(QuarterEnd(
1, startingMonth=1), datetime(2007, 6, 29), False),
(QuarterEnd(
1, startingMonth=1), datetime(2007, 6, 30), False),
(QuarterEnd(
1, startingMonth=2), datetime(2008, 1, 31), False),
(QuarterEnd(
1, startingMonth=2), datetime(2007, 12, 31), False),
(QuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),
(QuarterEnd(
1, startingMonth=2), datetime(2007, 3, 30), False),
(QuarterEnd(
1, startingMonth=2), datetime(2007, 3, 31), False),
(QuarterEnd(
1, startingMonth=2), datetime(2008, 4, 30), False),
(QuarterEnd(
1, startingMonth=2), datetime(2008, 5, 30), False),
(QuarterEnd(1, startingMonth=2), datetime(2008, 5, 31), True),
(QuarterEnd(
1, startingMonth=2), datetime(2007, 6, 29), False),
(QuarterEnd(
1, startingMonth=2), datetime(2007, 6, 30), False),
(QuarterEnd(
1, startingMonth=3), datetime(2008, 1, 31), False),
(QuarterEnd(
1, startingMonth=3), datetime(2007, 12, 31), True),
(QuarterEnd(
1, startingMonth=3), datetime(2008, 2, 29), False),
(QuarterEnd(
1, startingMonth=3), datetime(2007, 3, 30), False),
(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), True),
(QuarterEnd(
1, startingMonth=3), datetime(2008, 4, 30), False),
(QuarterEnd(
1, startingMonth=3), datetime(2008, 5, 30), False),
(QuarterEnd(
1, startingMonth=3), datetime(2008, 5, 31), False),
(QuarterEnd(
1, startingMonth=3), datetime(2007, 6, 29), False),
(QuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), True),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBYearBegin(Base):
_offset = BYearBegin
def test_misspecified(self):
self.assertRaises(ValueError, BYearBegin, month=13)
self.assertRaises(ValueError, BYearEnd, month=13)
def test_offset(self):
tests = []
tests.append((BYearBegin(),
{datetime(2008, 1, 1): datetime(2009, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2011, 1, 1): datetime(2011, 1, 3),
datetime(2011, 1, 3): datetime(2012, 1, 2),
datetime(2005, 12, 30): datetime(2006, 1, 2),
datetime(2005, 12, 31): datetime(2006, 1, 2)
}
))
tests.append((BYearBegin(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2005, 12, 30): datetime(2006, 1, 2),
datetime(2005, 12, 31): datetime(2006, 1, 2), }))
tests.append((BYearBegin(-1),
{datetime(2007, 1, 1): datetime(2006, 1, 2),
datetime(2009, 1, 4): datetime(2009, 1, 1),
datetime(2009, 1, 1): datetime(2008, 1, 1),
datetime(2008, 6, 30): datetime(2008, 1, 1),
datetime(2008, 12, 31): datetime(2008, 1, 1),
datetime(2006, 12, 29): datetime(2006, 1, 2),
datetime(2006, 12, 30): datetime(2006, 1, 2),
datetime(2006, 1, 1): datetime(2005, 1, 3), }))
tests.append((BYearBegin(-2),
{datetime(2007, 1, 1): datetime(2005, 1, 3),
datetime(2007, 6, 30): datetime(2006, 1, 2),
datetime(2008, 12, 31): datetime(2007, 1, 1), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
class TestYearBegin(Base):
_offset = YearBegin
def test_misspecified(self):
self.assertRaises(ValueError, YearBegin, month=13)
def test_offset(self):
tests = []
tests.append((YearBegin(),
{datetime(2008, 1, 1): datetime(2009, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2005, 12, 30): datetime(2006, 1, 1),
datetime(2005, 12, 31): datetime(2006, 1, 1), }))
tests.append((YearBegin(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2005, 12, 30): datetime(2006, 1, 1),
datetime(2005, 12, 31): datetime(2006, 1, 1), }))
tests.append((YearBegin(3),
{datetime(2008, 1, 1): datetime(2011, 1, 1),
datetime(2008, 6, 30): datetime(2011, 1, 1),
datetime(2008, 12, 31): datetime(2011, 1, 1),
datetime(2005, 12, 30): datetime(2008, 1, 1),
datetime(2005, 12, 31): datetime(2008, 1, 1), }))
tests.append((YearBegin(-1),
{datetime(2007, 1, 1): datetime(2006, 1, 1),
datetime(2007, 1, 15): datetime(2007, 1, 1),
datetime(2008, 6, 30): datetime(2008, 1, 1),
datetime(2008, 12, 31): datetime(2008, 1, 1),
datetime(2006, 12, 29): datetime(2006, 1, 1),
datetime(2006, 12, 30): datetime(2006, 1, 1),
datetime(2007, 1, 1): datetime(2006, 1, 1), }))
tests.append((YearBegin(-2),
{datetime(2007, 1, 1): datetime(2005, 1, 1),
datetime(2008, 6, 30): datetime(2007, 1, 1),
datetime(2008, 12, 31): datetime(2007, 1, 1), }))
tests.append((YearBegin(month=4),
{datetime(2007, 4, 1): datetime(2008, 4, 1),
datetime(2007, 4, 15): datetime(2008, 4, 1),
datetime(2007, 3, 1): datetime(2007, 4, 1),
datetime(2007, 12, 15): datetime(2008, 4, 1),
datetime(2012, 1, 31): datetime(2012, 4, 1), }))
tests.append((YearBegin(0, month=4),
{datetime(2007, 4, 1): datetime(2007, 4, 1),
datetime(2007, 3, 1): datetime(2007, 4, 1),
datetime(2007, 12, 15): datetime(2008, 4, 1),
datetime(2012, 1, 31): datetime(2012, 4, 1), }))
tests.append((YearBegin(4, month=4),
{datetime(2007, 4, 1): datetime(2011, 4, 1),
datetime(2007, 4, 15): datetime(2011, 4, 1),
datetime(2007, 3, 1): datetime(2010, 4, 1),
datetime(2007, 12, 15): datetime(2011, 4, 1),
datetime(2012, 1, 31): datetime(2015, 4, 1), }))
tests.append((YearBegin(-1, month=4),
{datetime(2007, 4, 1): datetime(2006, 4, 1),
datetime(2007, 3, 1): datetime(2006, 4, 1),
datetime(2007, 12, 15): datetime(2007, 4, 1),
datetime(2012, 1, 31): datetime(2011, 4, 1), }))
tests.append((YearBegin(-3, month=4),
{datetime(2007, 4, 1): datetime(2004, 4, 1),
datetime(2007, 3, 1): datetime(2004, 4, 1),
datetime(2007, 12, 15): datetime(2005, 4, 1),
datetime(2012, 1, 31): datetime(2009, 4, 1), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_onOffset(self):
tests = [
(YearBegin(), datetime(2007, 1, 3), False),
(YearBegin(), datetime(2008, 1, 1), True),
(YearBegin(), datetime(2006, 12, 31), False),
(YearBegin(), datetime(2006, 1, 2), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBYearEndLagged(Base):
def test_bad_month_fail(self):
self.assertRaises(Exception, BYearEnd, month=13)
self.assertRaises(Exception, BYearEnd, month=0)
def test_offset(self):
tests = []
tests.append((BYearEnd(month=6),
{datetime(2008, 1, 1): datetime(2008, 6, 30),
datetime(2007, 6, 30): datetime(2008, 6, 30)},
))
tests.append((BYearEnd(n=-1, month=6),
{datetime(2008, 1, 1): datetime(2007, 6, 29),
datetime(2007, 6, 30): datetime(2007, 6, 29)},
))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
self.assertEqual(base + offset, expected)
def test_roll(self):
offset = BYearEnd(month=6)
date = datetime(2009, 11, 30)
self.assertEqual(offset.rollforward(date), datetime(2010, 6, 30))
self.assertEqual(offset.rollback(date), datetime(2009, 6, 30))
def test_onOffset(self):
tests = [
(BYearEnd(month=2), datetime(2007, 2, 28), True),
(BYearEnd(month=6), datetime(2007, 6, 30), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBYearEnd(Base):
_offset = BYearEnd
def test_offset(self):
tests = []
tests.append((BYearEnd(),
{datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2009, 12, 31),
datetime(2005, 12, 30): datetime(2006, 12, 29),
datetime(2005, 12, 31): datetime(2006, 12, 29), }))
tests.append((BYearEnd(0),
{datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2008, 12, 31),
datetime(2005, 12, 31): datetime(2006, 12, 29), }))
tests.append((BYearEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 29),
datetime(2008, 6, 30): datetime(2007, 12, 31),
datetime(2008, 12, 31): datetime(2007, 12, 31),
datetime(2006, 12, 29): datetime(2005, 12, 30),
datetime(2006, 12, 30): datetime(2006, 12, 29),
datetime(2007, 1, 1): datetime(2006, 12, 29), }))
tests.append((BYearEnd(-2),
{datetime(2007, 1, 1): datetime(2005, 12, 30),
datetime(2008, 6, 30): datetime(2006, 12, 29),
datetime(2008, 12, 31): datetime(2006, 12, 29), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_onOffset(self):
tests = [
(BYearEnd(), datetime(2007, 12, 31), True),
(BYearEnd(), datetime(2008, 1, 1), False),
(BYearEnd(), datetime(2006, 12, 31), False),
(BYearEnd(), datetime(2006, 12, 29), True),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestYearEnd(Base):
_offset = YearEnd
def test_misspecified(self):
self.assertRaises(ValueError, YearEnd, month=13)
def test_offset(self):
tests = []
tests.append((YearEnd(),
{datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2009, 12, 31),
datetime(2005, 12, 30): datetime(2005, 12, 31),
datetime(2005, 12, 31): datetime(2006, 12, 31), }))
tests.append((YearEnd(0),
{datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2008, 12, 31),
datetime(2005, 12, 30): datetime(2005, 12, 31), }))
tests.append((YearEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2007, 12, 31),
datetime(2008, 12, 31): datetime(2007, 12, 31),
datetime(2006, 12, 29): datetime(2005, 12, 31),
datetime(2006, 12, 30): datetime(2005, 12, 31),
datetime(2007, 1, 1): datetime(2006, 12, 31), }))
tests.append((YearEnd(-2),
{datetime(2007, 1, 1): datetime(2005, 12, 31),
datetime(2008, 6, 30): datetime(2006, 12, 31),
datetime(2008, 12, 31): datetime(2006, 12, 31), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_onOffset(self):
tests = [
(YearEnd(), datetime(2007, 12, 31), True),
(YearEnd(), datetime(2008, 1, 1), False),
(YearEnd(), datetime(2006, 12, 31), True),
(YearEnd(), datetime(2006, 12, 29), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestYearEndDiffMonth(Base):
def test_offset(self):
tests = []
tests.append((YearEnd(month=3),
{datetime(2008, 1, 1): datetime(2008, 3, 31),
datetime(2008, 2, 15): datetime(2008, 3, 31),
datetime(2008, 3, 31): datetime(2009, 3, 31),
datetime(2008, 3, 30): datetime(2008, 3, 31),
datetime(2005, 3, 31): datetime(2006, 3, 31),
datetime(2006, 7, 30): datetime(2007, 3, 31)}))
tests.append((YearEnd(0, month=3),
{datetime(2008, 1, 1): datetime(2008, 3, 31),
datetime(2008, 2, 28): datetime(2008, 3, 31),
datetime(2008, 3, 31): datetime(2008, 3, 31),
datetime(2005, 3, 30): datetime(2005, 3, 31), }))
tests.append((YearEnd(-1, month=3),
{datetime(2007, 1, 1): datetime(2006, 3, 31),
datetime(2008, 2, 28): datetime(2007, 3, 31),
datetime(2008, 3, 31): datetime(2007, 3, 31),
datetime(2006, 3, 29): datetime(2005, 3, 31),
datetime(2006, 3, 30): datetime(2005, 3, 31),
datetime(2007, 3, 1): datetime(2006, 3, 31), }))
tests.append((YearEnd(-2, month=3),
{datetime(2007, 1, 1): datetime(2005, 3, 31),
datetime(2008, 6, 30): datetime(2007, 3, 31),
datetime(2008, 3, 31): datetime(2006, 3, 31), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_onOffset(self):
tests = [
(YearEnd(month=3), datetime(2007, 3, 31), True),
(YearEnd(month=3), datetime(2008, 1, 1), False),
(YearEnd(month=3), datetime(2006, 3, 31), True),
(YearEnd(month=3), datetime(2006, 3, 29), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def assertEq(offset, base, expected):
actual = offset + base
actual_swapped = base + offset
actual_apply = offset.apply(base)
try:
assert actual == expected
assert actual_swapped == expected
assert actual_apply == expected
except AssertionError:
raise AssertionError("\nExpected: %s\nActual: %s\nFor Offset: %s)"
"\nAt Date: %s" %
(expected, actual, offset, base))
def test_Easter():
assertEq(Easter(), datetime(2010, 1, 1), datetime(2010, 4, 4))
assertEq(Easter(), datetime(2010, 4, 5), datetime(2011, 4, 24))
assertEq(Easter(2), datetime(2010, 1, 1), datetime(2011, 4, 24))
assertEq(Easter(), datetime(2010, 4, 4), datetime(2011, 4, 24))
assertEq(Easter(2), datetime(2010, 4, 4), datetime(2012, 4, 8))
assertEq(-Easter(), datetime(2011, 1, 1), datetime(2010, 4, 4))
assertEq(-Easter(), datetime(2010, 4, 5), datetime(2010, 4, 4))
assertEq(-Easter(2), datetime(2011, 1, 1), datetime(2009, 4, 12))
assertEq(-Easter(), datetime(2010, 4, 4), datetime(2009, 4, 12))
assertEq(-Easter(2), datetime(2010, 4, 4), datetime(2008, 3, 23))
class TestTicks(tm.TestCase):
ticks = [Hour, Minute, Second, Milli, Micro, Nano]
def test_ticks(self):
offsets = [(Hour, Timedelta(hours=5)),
(Minute, Timedelta(hours=2, minutes=3)),
(Second, Timedelta(hours=2, seconds=3)),
(Milli, Timedelta(hours=2, milliseconds=3)),
(Micro, Timedelta(hours=2, microseconds=3)),
(Nano, Timedelta(hours=2, nanoseconds=3))]
for kls, expected in offsets:
offset = kls(3)
result = offset + Timedelta(hours=2)
self.assertTrue(isinstance(result, Timedelta))
self.assertEqual(result, expected)
def test_Hour(self):
assertEq(Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 1))
assertEq(Hour(-1), datetime(2010, 1, 1, 1), datetime(2010, 1, 1))
assertEq(2 * Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 2))
assertEq(-1 * Hour(), datetime(2010, 1, 1, 1), datetime(2010, 1, 1))
self.assertEqual(Hour(3) + Hour(2), Hour(5))
self.assertEqual(Hour(3) - Hour(2), Hour())
self.assertNotEqual(Hour(4), Hour(1))
def test_Minute(self):
assertEq(Minute(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 1))
assertEq(Minute(-1), datetime(2010, 1, 1, 0, 1), datetime(2010, 1, 1))
assertEq(2 * Minute(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 2))
assertEq(-1 * Minute(), datetime(2010, 1, 1, 0, 1), datetime(2010, 1, 1))
self.assertEqual(Minute(3) + Minute(2), Minute(5))
self.assertEqual(Minute(3) - Minute(2), Minute())
self.assertNotEqual(Minute(5), Minute())
def test_Second(self):
assertEq(Second(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 1))
assertEq(Second(-1), datetime(2010, 1, 1, 0, 0, 1), datetime(2010, 1, 1))
assertEq(2 * Second(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 2))
assertEq(
-1 * Second(), datetime(2010, 1, 1, 0, 0, 1), datetime(2010, 1, 1))
self.assertEqual(Second(3) + Second(2), Second(5))
self.assertEqual(Second(3) - Second(2), Second())
def test_Millisecond(self):
assertEq(Milli(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 1000))
assertEq(Milli(-1), datetime(2010, 1, 1, 0, 0, 0, 1000), datetime(2010, 1, 1))
assertEq(Milli(2), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 2000))
assertEq(2 * Milli(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 2000))
assertEq(-1 * Milli(), datetime(2010, 1, 1, 0, 0, 0, 1000), datetime(2010, 1, 1))
self.assertEqual(Milli(3) + Milli(2), Milli(5))
self.assertEqual(Milli(3) - Milli(2), Milli())
def test_MillisecondTimestampArithmetic(self):
assertEq(Milli(), Timestamp('2010-01-01'), Timestamp('2010-01-01 00:00:00.001'))
assertEq(Milli(-1), Timestamp('2010-01-01 00:00:00.001'), Timestamp('2010-01-01'))
def test_Microsecond(self):
assertEq(Micro(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 1))
assertEq(Micro(-1), datetime(2010, 1, 1, 0, 0, 0, 1), datetime(2010, 1, 1))
assertEq(2 * Micro(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 2))
assertEq(-1 * Micro(), datetime(2010, 1, 1, 0, 0, 0, 1), datetime(2010, 1, 1))
self.assertEqual(Micro(3) + Micro(2), Micro(5))
self.assertEqual(Micro(3) - Micro(2), Micro())
def test_NanosecondGeneric(self):
timestamp = Timestamp(datetime(2010, 1, 1))
self.assertEqual(timestamp.nanosecond, 0)
result = timestamp + Nano(10)
self.assertEqual(result.nanosecond, 10)
reverse_result = Nano(10) + timestamp
self.assertEqual(reverse_result.nanosecond, 10)
def test_Nanosecond(self):
timestamp = Timestamp(datetime(2010, 1, 1))
assertEq(Nano(), timestamp, timestamp + np.timedelta64(1, 'ns'))
assertEq(Nano(-1), timestamp + np.timedelta64(1, 'ns'), timestamp)
assertEq(2 * Nano(), timestamp, timestamp + np.timedelta64(2, 'ns'))
assertEq(-1 * Nano(), timestamp + np.timedelta64(1, 'ns'), timestamp)
self.assertEqual(Nano(3) + Nano(2), Nano(5))
self.assertEqual(Nano(3) - Nano(2), Nano())
# GH9284
self.assertEqual(Nano(1) + Nano(10), Nano(11))
self.assertEqual(Nano(5) + Micro(1), Nano(1005))
self.assertEqual(Micro(5) + Nano(1), Nano(5001))
def test_tick_zero(self):
for t1 in self.ticks:
for t2 in self.ticks:
self.assertEqual(t1(0), t2(0))
self.assertEqual(t1(0) + t2(0), t1(0))
if t1 is not Nano:
self.assertEqual(t1(2) + t2(0), t1(2))
if t1 is Nano:
self.assertEqual(t1(2) + Nano(0), t1(2))
def test_tick_equalities(self):
for t in self.ticks:
self.assertEqual(t(3), t(3))
self.assertEqual(t(), t(1))
# not equals
self.assertNotEqual(t(3), t(2))
self.assertNotEqual(t(3), t(-3))
def test_tick_operators(self):
for t in self.ticks:
self.assertEqual(t(3) + t(2), t(5))
self.assertEqual(t(3) - t(2), t(1))
self.assertEqual(t(800) + t(300), t(1100))
self.assertEqual(t(1000) - t(5), t(995))
def test_tick_offset(self):
for t in self.ticks:
self.assertFalse(t().isAnchored())
def test_compare_ticks(self):
for kls in self.ticks:
three = kls(3)
four = kls(4)
for _ in range(10):
self.assertTrue(three < kls(4))
self.assertTrue(kls(3) < four)
self.assertTrue(four > kls(3))
self.assertTrue(kls(4) > three)
self.assertTrue(kls(3) == kls(3))
self.assertTrue(kls(3) != kls(4))
class TestOffsetNames(tm.TestCase):
def test_get_offset_name(self):
assertRaisesRegexp(ValueError, 'Bad rule.*BusinessDays', get_offset_name, BDay(2))
assert get_offset_name(BDay()) == 'B'
assert get_offset_name(BMonthEnd()) == 'BM'
assert get_offset_name(Week(weekday=0)) == 'W-MON'
assert get_offset_name(Week(weekday=1)) == 'W-TUE'
assert get_offset_name(Week(weekday=2)) == 'W-WED'
assert get_offset_name(Week(weekday=3)) == 'W-THU'
assert get_offset_name(Week(weekday=4)) == 'W-FRI'
self.assertEqual(get_offset_name(LastWeekOfMonth(weekday=WeekDay.SUN)), "LWOM-SUN")
self.assertEqual(get_offset_name(makeFY5253LastOfMonthQuarter(weekday=1, startingMonth=3, qtr_with_extra_week=4)),"REQ-L-MAR-TUE-4")
self.assertEqual(get_offset_name(makeFY5253NearestEndMonthQuarter(weekday=1, startingMonth=3, qtr_with_extra_week=3)), "REQ-N-MAR-TUE-3")
def test_get_offset():
assertRaisesRegexp(ValueError, "rule.*GIBBERISH", get_offset, 'gibberish')
assertRaisesRegexp(ValueError, "rule.*QS-JAN-B", get_offset, 'QS-JAN-B')
pairs = [
('B', BDay()), ('b', BDay()), ('bm', BMonthEnd()),
('Bm', BMonthEnd()), ('W-MON', Week(weekday=0)),
('W-TUE', Week(weekday=1)), ('W-WED', Week(weekday=2)),
('W-THU', Week(weekday=3)), ('W-FRI', Week(weekday=4)),
("RE-N-DEC-MON", makeFY5253NearestEndMonth(weekday=0, startingMonth=12)),
("RE-L-DEC-TUE", makeFY5253LastOfMonth(weekday=1, startingMonth=12)),
("REQ-L-MAR-TUE-4", makeFY5253LastOfMonthQuarter(weekday=1, startingMonth=3, qtr_with_extra_week=4)),
("REQ-L-DEC-MON-3", makeFY5253LastOfMonthQuarter(weekday=0, startingMonth=12, qtr_with_extra_week=3)),
("REQ-N-DEC-MON-3", makeFY5253NearestEndMonthQuarter(weekday=0, startingMonth=12, qtr_with_extra_week=3)),
]
for name, expected in pairs:
offset = get_offset(name)
assert offset == expected, ("Expected %r to yield %r (actual: %r)" %
(name, expected, offset))
def test_get_offset_legacy():
pairs = [('w@Sat', Week(weekday=5))]
for name, expected in pairs:
with tm.assert_produces_warning(FutureWarning):
offset = get_offset(name)
assert offset == expected, ("Expected %r to yield %r (actual: %r)" %
(name, expected, offset))
class TestParseTimeString(tm.TestCase):
def test_parse_time_string(self):
(date, parsed, reso) = parse_time_string('4Q1984')
(date_lower, parsed_lower, reso_lower) = parse_time_string('4q1984')
self.assertEqual(date, date_lower)
self.assertEqual(parsed, parsed_lower)
self.assertEqual(reso, reso_lower)
def test_parse_time_quarter_w_dash(self):
# https://github.com/pydata/pandas/issue/9688
pairs = [
('1988-Q2', '1988Q2'),
('2Q-1988', '2Q1988'),
]
for dashed, normal in pairs:
(date_dash, parsed_dash, reso_dash) = parse_time_string(dashed)
(date, parsed, reso) = parse_time_string(normal)
self.assertEqual(date_dash, date)
self.assertEqual(parsed_dash, parsed)
self.assertEqual(reso_dash, reso)
self.assertRaises(DateParseError, parse_time_string, "-2Q1992")
self.assertRaises(DateParseError, parse_time_string, "2-Q1992")
self.assertRaises(DateParseError, parse_time_string, "4-4Q1992")
def test_get_standard_freq():
fstr = get_standard_freq('W')
assert fstr == get_standard_freq('w')
assert fstr == get_standard_freq('1w')
assert fstr == get_standard_freq(('W', 1))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = get_standard_freq('WeEk')
assert fstr == result
fstr = get_standard_freq('5Q')
assert fstr == get_standard_freq('5q')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = get_standard_freq('5QuarTer')
assert fstr == result
assert fstr == get_standard_freq(('q', 5))
def test_quarterly_dont_normalize():
date = datetime(2012, 3, 31, 5, 30)
offsets = (QuarterBegin, QuarterEnd, BQuarterEnd, BQuarterBegin)
for klass in offsets:
result = date + klass()
assert(result.time() == date.time())
class TestOffsetAliases(tm.TestCase):
def setUp(self):
_offset_map.clear()
def test_alias_equality(self):
for k, v in compat.iteritems(_offset_map):
if v is None:
continue
self.assertEqual(k, v.copy())
def test_rule_code(self):
lst = ['M', 'MS', 'BM', 'BMS', 'D', 'B', 'H', 'T', 'S', 'L', 'U']
for k in lst:
self.assertEqual(k, get_offset(k).rule_code)
# should be cached - this is kind of an internals test...
assert k in _offset_map
self.assertEqual(k, (get_offset(k) * 3).rule_code)
suffix_lst = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
base = 'W'
for v in suffix_lst:
alias = '-'.join([base, v])
self.assertEqual(alias, get_offset(alias).rule_code)
self.assertEqual(alias, (get_offset(alias) * 5).rule_code)
suffix_lst = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG',
'SEP', 'OCT', 'NOV', 'DEC']
base_lst = ['A', 'AS', 'BA', 'BAS', 'Q', 'QS', 'BQ', 'BQS']
for base in base_lst:
for v in suffix_lst:
alias = '-'.join([base, v])
self.assertEqual(alias, get_offset(alias).rule_code)
self.assertEqual(alias, (get_offset(alias) * 5).rule_code)
lst = ['M', 'D', 'B', 'H', 'T', 'S', 'L', 'U']
for k in lst:
code, stride = get_freq_code('3' + k)
self.assertTrue(isinstance(code, int))
self.assertEqual(stride, 3)
self.assertEqual(k, _get_freq_str(code))
def test_apply_ticks():
result = offsets.Hour(3).apply(offsets.Hour(4))
exp = offsets.Hour(7)
assert(result == exp)
def test_delta_to_tick():
delta = timedelta(3)
tick = offsets._delta_to_tick(delta)
assert(tick == offsets.Day(3))
def test_dateoffset_misc():
oset = offsets.DateOffset(months=2, days=4)
# it works
result = oset.freqstr
assert(not offsets.DateOffset(months=2) == 2)
def test_freq_offsets():
off = BDay(1, offset=timedelta(0, 1800))
assert(off.freqstr == 'B+30Min')
off = BDay(1, offset=timedelta(0, -1800))
assert(off.freqstr == 'B-30Min')
def get_all_subclasses(cls):
ret = set()
this_subclasses = cls.__subclasses__()
ret = ret | set(this_subclasses)
for this_subclass in this_subclasses:
ret | get_all_subclasses(this_subclass)
return ret
class TestCaching(tm.TestCase):
# as of GH 6479 (in 0.14.0), offset caching is turned off
# as of v0.12.0 only BusinessMonth/Quarter were actually caching
def setUp(self):
_daterange_cache.clear()
_offset_map.clear()
def run_X_index_creation(self, cls):
inst1 = cls()
if not inst1.isAnchored():
self.assertFalse(inst1._should_cache(), cls)
return
self.assertTrue(inst1._should_cache(), cls)
DatetimeIndex(start=datetime(2013,1,31), end=datetime(2013,3,31), freq=inst1, normalize=True)
self.assertTrue(cls() in _daterange_cache, cls)
def test_should_cache_month_end(self):
self.assertFalse(MonthEnd()._should_cache())
def test_should_cache_bmonth_end(self):
self.assertFalse(BusinessMonthEnd()._should_cache())
def test_should_cache_week_month(self):
self.assertFalse(WeekOfMonth(weekday=1, week=2)._should_cache())
def test_all_cacheableoffsets(self):
for subclass in get_all_subclasses(CacheableOffset):
if subclass.__name__[0] == "_" \
or subclass in TestCaching.no_simple_ctr:
continue
self.run_X_index_creation(subclass)
def test_month_end_index_creation(self):
DatetimeIndex(start=datetime(2013,1,31), end=datetime(2013,3,31), freq=MonthEnd(), normalize=True)
self.assertFalse(MonthEnd() in _daterange_cache)
def test_bmonth_end_index_creation(self):
DatetimeIndex(start=datetime(2013,1,31), end=datetime(2013,3,29), freq=BusinessMonthEnd(), normalize=True)
self.assertFalse(BusinessMonthEnd() in _daterange_cache)
def test_week_of_month_index_creation(self):
inst1 = WeekOfMonth(weekday=1, week=2)
DatetimeIndex(start=datetime(2013,1,31), end=datetime(2013,3,29), freq=inst1, normalize=True)
inst2 = WeekOfMonth(weekday=1, week=2)
self.assertFalse(inst2 in _daterange_cache)
class TestReprNames(tm.TestCase):
def test_str_for_named_is_name(self):
# look at all the amazing combinations!
month_prefixes = ['A', 'AS', 'BA', 'BAS', 'Q', 'BQ', 'BQS', 'QS']
names = [prefix + '-' + month for prefix in month_prefixes
for month in ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN',
'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC']]
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
names += ['W-' + day for day in days]
names += ['WOM-' + week + day for week in ('1', '2', '3', '4')
for day in days]
#singletons
names += ['S', 'T', 'U', 'BM', 'BMS', 'BQ', 'QS'] # No 'Q'
_offset_map.clear()
for name in names:
offset = get_offset(name)
self.assertEqual(repr(offset), name)
self.assertEqual(str(offset), name)
def get_utc_offset_hours(ts):
# take a Timestamp and compute total hours of utc offset
o = ts.utcoffset()
return (o.days * 24 * 3600 + o.seconds) / 3600.0
class TestDST(tm.TestCase):
"""
test DateOffset additions over Daylight Savings Time
"""
# one microsecond before the DST transition
ts_pre_fallback = "2013-11-03 01:59:59.999999"
ts_pre_springfwd = "2013-03-10 01:59:59.999999"
# test both basic names and dateutil timezones
timezone_utc_offsets = {
'US/Eastern': dict(
utc_offset_daylight=-4,
utc_offset_standard=-5,
),
'dateutil/US/Pacific': dict(
utc_offset_daylight=-7,
utc_offset_standard=-8,
)
}
valid_date_offsets_singular = [
'weekday', 'day', 'hour', 'minute', 'second', 'microsecond'
]
valid_date_offsets_plural = [
'weeks', 'days',
'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds'
]
def _test_all_offsets(self, n, **kwds):
valid_offsets = self.valid_date_offsets_plural if n > 1 \
else self.valid_date_offsets_singular
for name in valid_offsets:
self._test_offset(offset_name=name, offset_n=n, **kwds)
def _test_offset(self, offset_name, offset_n, tstart, expected_utc_offset):
offset = DateOffset(**{offset_name: offset_n})
t = tstart + offset
if expected_utc_offset is not None:
self.assertTrue(get_utc_offset_hours(t) == expected_utc_offset)
if offset_name == 'weeks':
# dates should match
self.assertTrue(
t.date() ==
timedelta(days=7 * offset.kwds['weeks']) + tstart.date()
)
# expect the same day of week, hour of day, minute, second, ...
self.assertTrue(
t.dayofweek == tstart.dayofweek and
t.hour == tstart.hour and
t.minute == tstart.minute and
t.second == tstart.second
)
elif offset_name == 'days':
# dates should match
self.assertTrue(timedelta(offset.kwds['days']) + tstart.date() == t.date())
# expect the same hour of day, minute, second, ...
self.assertTrue(
t.hour == tstart.hour and
t.minute == tstart.minute and
t.second == tstart.second
)
elif offset_name in self.valid_date_offsets_singular:
# expect the signular offset value to match between tstart and t
datepart_offset = getattr(t, offset_name if offset_name != 'weekday' else 'dayofweek')
self.assertTrue(datepart_offset == offset.kwds[offset_name])
else:
# the offset should be the same as if it was done in UTC
self.assertTrue(
t == (tstart.tz_convert('UTC') + offset).tz_convert('US/Pacific')
)
def _make_timestamp(self, string, hrs_offset, tz):
offset_string = '{hrs:02d}00'.format(hrs=hrs_offset) if hrs_offset >= 0 else \
'-{hrs:02d}00'.format(hrs=-1 * hrs_offset)
return Timestamp(string + offset_string).tz_convert(tz)
def test_fallback_plural(self):
"""test moving from daylight savings to standard time"""
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_daylight']
hrs_post = utc_offsets['utc_offset_standard']
self._test_all_offsets(
n=3,
tstart=self._make_timestamp(self.ts_pre_fallback, hrs_pre, tz),
expected_utc_offset=hrs_post
)
def test_springforward_plural(self):
"""test moving from standard to daylight savings"""
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
hrs_post = utc_offsets['utc_offset_daylight']
self._test_all_offsets(
n=3,
tstart=self._make_timestamp(self.ts_pre_springfwd, hrs_pre, tz),
expected_utc_offset=hrs_post
)
def test_fallback_singular(self):
# in the case of signular offsets, we dont neccesarily know which utc offset
# the new Timestamp will wind up in (the tz for 1 month may be different from 1 second)
# so we don't specify an expected_utc_offset
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
self._test_all_offsets(
n=1,
tstart=self._make_timestamp(self.ts_pre_fallback, hrs_pre, tz),
expected_utc_offset=None
)
def test_springforward_singular(self):
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
self._test_all_offsets(
n=1,
tstart=self._make_timestamp(self.ts_pre_springfwd, hrs_pre, tz),
expected_utc_offset=None
)
def test_all_offset_classes(self):
tests = {MonthBegin: ['11/2/2012', '12/1/2012'],
MonthEnd: ['11/2/2012', '11/30/2012'],
BMonthBegin: ['11/2/2012', '12/3/2012'],
BMonthEnd: ['11/2/2012', '11/30/2012'],
CBMonthBegin: ['11/2/2012', '12/3/2012'],
CBMonthEnd: ['11/2/2012', '11/30/2012'],
Week: ['11/2/2012', '11/9/2012'],
YearBegin: ['11/2/2012', '1/1/2013'],
YearEnd: ['11/2/2012', '12/31/2012'],
BYearBegin: ['11/2/2012', '1/1/2013'],
BYearEnd: ['11/2/2012', '12/31/2012'],
QuarterBegin: ['11/2/2012', '12/1/2012'],
QuarterEnd: ['11/2/2012', '12/31/2012'],
BQuarterBegin: ['11/2/2012', '12/3/2012'],
BQuarterEnd: ['11/2/2012', '12/31/2012'],
Day: ['11/4/2012', '11/4/2012 23:00']
}
for offset, test_values in iteritems(tests):
first = Timestamp(test_values[0], tz='US/Eastern') + offset()
second = Timestamp(test_values[1], tz='US/Eastern')
self.assertEqual(first, second, str(offset))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
artistic-2.0
|
bthirion/nipy
|
examples/labs/hierarchical_rois.py
|
4
|
2070
|
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function # Python 2/3 compatibility
__doc__ = \
"""
Example of a script that crates a 'hierarchical roi' structure from the blob
model of an image
Needs matplotlib
Author: Bertrand Thirion, 2008-2009
"""
print(__doc__)
import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
raise RuntimeError("This script needs the matplotlib library")
import nipy.labs.spatial_models.hroi as hroi
import nipy.labs.utils.simul_multisubject_fmri_dataset as simul
from nipy.labs.spatial_models.discrete_domain import domain_from_binary_array
##############################################################################
# simulate the data
shape = (60, 60)
pos = np.array([[12, 14], [20, 20], [30, 20]])
ampli = np.array([3, 4, 4])
dataset = simul.surrogate_2d_dataset(n_subj=1, shape=shape, pos=pos,
ampli=ampli, width=10.0).squeeze()
# create a domain descriptor associated with this
domain = domain_from_binary_array(dataset ** 2 > 0)
nroi = hroi.HROI_as_discrete_domain_blobs(domain, dataset.ravel(),
threshold=2., smin=5)
n1 = nroi.copy()
nroi.reduce_to_leaves()
td = n1.make_forest().depth_from_leaves()
root = np.argmax(td)
lv = n1.make_forest().get_descendants(root)
u = nroi.make_graph().cc()
flat_data = dataset.ravel()
activation = [flat_data[nroi.select_id(id, roi=False)]
for id in nroi.get_id()]
nroi.set_feature('activation', activation)
label = np.reshape(n1.label, shape)
label_ = np.reshape(nroi.label, shape)
# make a figure
plt.figure(figsize=(10, 4))
plt.subplot(1, 3, 1)
plt.imshow(np.squeeze(dataset))
plt.title('Input map')
plt.axis('off')
plt.subplot(1, 3, 2)
plt.title('Nested Rois')
plt.imshow(label, interpolation='Nearest')
plt.axis('off')
plt.subplot(1, 3, 3)
plt.title('Leave Rois')
plt.imshow(label_, interpolation='Nearest')
plt.axis('off')
plt.show()
|
bsd-3-clause
|
kelseyoo14/Wander
|
venv_2_7/lib/python2.7/site-packages/pandas/tseries/plotting.py
|
9
|
9293
|
"""
Period formatters and locators adapted from scikits.timeseries by
Pierre GF Gerard-Marchant & Matt Knox
"""
#!!! TODO: Use the fact that axis can have units to simplify the process
import numpy as np
from matplotlib import pylab
from pandas.tseries.period import Period
from pandas.tseries.offsets import DateOffset
import pandas.tseries.frequencies as frequencies
from pandas.tseries.index import DatetimeIndex
import pandas.core.common as com
import pandas.compat as compat
from pandas.tseries.converter import (TimeSeries_DateLocator,
TimeSeries_DateFormatter)
#----------------------------------------------------------------------
# Plotting functions and monkey patches
def tsplot(series, plotf, ax=None, **kwargs):
"""
Plots a Series on the given Matplotlib axes or the current axes
Parameters
----------
axes : Axes
series : Series
Notes
_____
Supports same kwargs as Axes.plot
"""
# Used inferred freq is possible, need a test case for inferred
if ax is None:
import matplotlib.pyplot as plt
ax = plt.gca()
freq, series = _maybe_resample(series, ax, kwargs)
# Set ax with freq info
_decorate_axes(ax, freq, kwargs)
ax._plot_data.append((series, plotf, kwargs))
lines = plotf(ax, series.index._mpl_repr(), series.values, **kwargs)
# set date formatter, locators and rescale limits
format_dateaxis(ax, ax.freq)
return lines
def _maybe_resample(series, ax, kwargs):
# resample against axes freq if necessary
freq, ax_freq = _get_freq(ax, series)
if freq is None: # pragma: no cover
raise ValueError('Cannot use dynamic axis without frequency info')
# Convert DatetimeIndex to PeriodIndex
if isinstance(series.index, DatetimeIndex):
series = series.to_period(freq=freq)
if ax_freq is not None and freq != ax_freq:
if frequencies.is_superperiod(freq, ax_freq): # upsample input
series = series.copy()
series.index = series.index.asfreq(ax_freq, how='s')
freq = ax_freq
elif _is_sup(freq, ax_freq): # one is weekly
how = kwargs.pop('how', 'last')
series = series.resample('D', how=how).dropna()
series = series.resample(ax_freq, how=how).dropna()
freq = ax_freq
elif frequencies.is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq):
_upsample_others(ax, freq, kwargs)
ax_freq = freq
else: # pragma: no cover
raise ValueError('Incompatible frequency conversion')
return freq, series
def _is_sub(f1, f2):
return ((f1.startswith('W') and frequencies.is_subperiod('D', f2)) or
(f2.startswith('W') and frequencies.is_subperiod(f1, 'D')))
def _is_sup(f1, f2):
return ((f1.startswith('W') and frequencies.is_superperiod('D', f2)) or
(f2.startswith('W') and frequencies.is_superperiod(f1, 'D')))
def _upsample_others(ax, freq, kwargs):
legend = ax.get_legend()
lines, labels = _replot_ax(ax, freq, kwargs)
_replot_ax(ax, freq, kwargs)
other_ax = None
if hasattr(ax, 'left_ax'):
other_ax = ax.left_ax
if hasattr(ax, 'right_ax'):
other_ax = ax.right_ax
if other_ax is not None:
rlines, rlabels = _replot_ax(other_ax, freq, kwargs)
lines.extend(rlines)
labels.extend(rlabels)
if (legend is not None and kwargs.get('legend', True) and
len(lines) > 0):
title = legend.get_title().get_text()
if title == 'None':
title = None
ax.legend(lines, labels, loc='best', title=title)
def _replot_ax(ax, freq, kwargs):
data = getattr(ax, '_plot_data', None)
# clear current axes and data
ax._plot_data = []
ax.clear()
_decorate_axes(ax, freq, kwargs)
lines = []
labels = []
if data is not None:
for series, plotf, kwds in data:
series = series.copy()
idx = series.index.asfreq(freq, how='S')
series.index = idx
ax._plot_data.append((series, plotf, kwds))
# for tsplot
if isinstance(plotf, compat.string_types):
from pandas.tools.plotting import _plot_klass
plotf = _plot_klass[plotf]._plot
lines.append(plotf(ax, series.index._mpl_repr(), series.values, **kwds)[0])
labels.append(com.pprint_thing(series.name))
return lines, labels
def _decorate_axes(ax, freq, kwargs):
"""Initialize axes for time-series plotting"""
if not hasattr(ax, '_plot_data'):
ax._plot_data = []
ax.freq = freq
xaxis = ax.get_xaxis()
xaxis.freq = freq
if not hasattr(ax, 'legendlabels'):
ax.legendlabels = [kwargs.get('label', None)]
else:
ax.legendlabels.append(kwargs.get('label', None))
ax.view_interval = None
ax.date_axis_info = None
def _get_freq(ax, series):
# get frequency from data
freq = getattr(series.index, 'freq', None)
if freq is None:
freq = getattr(series.index, 'inferred_freq', None)
ax_freq = getattr(ax, 'freq', None)
if ax_freq is None:
if hasattr(ax, 'left_ax'):
ax_freq = getattr(ax.left_ax, 'freq', None)
elif hasattr(ax, 'right_ax'):
ax_freq = getattr(ax.right_ax, 'freq', None)
# use axes freq if no data freq
if freq is None:
freq = ax_freq
# get the period frequency
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
return freq, ax_freq
def _use_dynamic_x(ax, data):
freq = _get_index_freq(data)
ax_freq = getattr(ax, 'freq', None)
if freq is None: # convert irregular if axes has freq info
freq = ax_freq
else: # do not use tsplot if irregular was plotted first
if (ax_freq is None) and (len(ax.get_lines()) > 0):
return False
if freq is None:
return False
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
if freq is None:
return False
# hack this for 0.10.1, creating more technical debt...sigh
if isinstance(data.index, DatetimeIndex):
base = frequencies.get_freq(freq)
x = data.index
if (base <= frequencies.FreqGroup.FR_DAY):
return x[:1].is_normalized
return Period(x[0], freq).to_timestamp(tz=x.tz) == x[0]
return True
def _get_index_freq(data):
freq = getattr(data.index, 'freq', None)
if freq is None:
freq = getattr(data.index, 'inferred_freq', None)
if freq == 'B':
weekdays = np.unique(data.index.dayofweek)
if (5 in weekdays) or (6 in weekdays):
freq = None
return freq
def _maybe_convert_index(ax, data):
# tsplot converts automatically, but don't want to convert index
# over and over for DataFrames
if isinstance(data.index, DatetimeIndex):
freq = getattr(data.index, 'freq', None)
if freq is None:
freq = getattr(data.index, 'inferred_freq', None)
if isinstance(freq, DateOffset):
freq = freq.rule_code
if freq is None:
freq = getattr(ax, 'freq', None)
if freq is None:
raise ValueError('Could not get frequency alias for plotting')
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
data = data.to_period(freq=freq)
return data
# Patch methods for subplot. Only format_dateaxis is currently used.
# Do we need the rest for convenience?
def format_dateaxis(subplot, freq):
"""
Pretty-formats the date axis (x-axis).
Major and minor ticks are automatically set for the frequency of the
current underlying series. As the dynamic mode is activated by
default, changing the limits of the x axis will intelligently change
the positions of the ticks.
"""
majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True,
minor_locator=False,
plot_obj=subplot)
minlocator = TimeSeries_DateLocator(freq, dynamic_mode=True,
minor_locator=True,
plot_obj=subplot)
subplot.xaxis.set_major_locator(majlocator)
subplot.xaxis.set_minor_locator(minlocator)
majformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True,
minor_locator=False,
plot_obj=subplot)
minformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True,
minor_locator=True,
plot_obj=subplot)
subplot.xaxis.set_major_formatter(majformatter)
subplot.xaxis.set_minor_formatter(minformatter)
# x and y coord info
subplot.format_coord = lambda t, y: ("t = {0} "
"y = {1:8f}".format(Period(ordinal=int(t), freq=freq), y))
pylab.draw_if_interactive()
|
artistic-2.0
|
kenshay/ImageScripter
|
ProgramData/SystemFiles/Python/Lib/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py
|
18
|
26105
|
"""
An experimental support for curvilinear grid.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
from itertools import chain
from .grid_finder import GridFinder
from .axislines import AxisArtistHelper, GridHelperBase
from .axis_artist import AxisArtist
from matplotlib.transforms import Affine2D, IdentityTransform
import numpy as np
from matplotlib.path import Path
class FixedAxisArtistHelper(AxisArtistHelper.Fixed):
"""
Helper class for a fixed axis.
"""
def __init__(self, grid_helper, side, nth_coord_ticks=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(FixedAxisArtistHelper, self).__init__( \
loc=side,
)
self.grid_helper = grid_helper
if nth_coord_ticks is None:
nth_coord_ticks = self.nth_coord
self.nth_coord_ticks = nth_coord_ticks
self.side = side
self._limits_inverted = False
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
if self.nth_coord == 0:
xy1, xy2 = axes.get_ylim()
else:
xy1, xy2 = axes.get_xlim()
if xy1 > xy2:
self._limits_inverted = True
else:
self._limits_inverted = False
def change_tick_coord(self, coord_number=None):
if coord_number is None:
self.nth_coord_ticks = 1 - self.nth_coord_ticks
elif coord_number in [0, 1]:
self.nth_coord_ticks = coord_number
else:
raise Exception("wrong coord number")
def get_tick_transform(self, axes):
return axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
g = self.grid_helper
if self._limits_inverted:
side = {"left":"right","right":"left",
"top":"bottom", "bottom":"top"}[self.side]
else:
side = self.side
ti1 = g.get_tick_iterator(self.nth_coord_ticks, side)
ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, side, minor=True)
#ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, self.side, minor=True)
return chain(ti1, ti2), iter([])
class FloatingAxisArtistHelper(AxisArtistHelper.Floating):
def __init__(self, grid_helper, nth_coord, value, axis_direction=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(FloatingAxisArtistHelper, self).__init__(nth_coord,
value,
)
self.value = value
self.grid_helper = grid_helper
self._extremes = None, None
self._get_line_path = None # a method that returns a Path.
self._line_num_points = 100 # number of points to create a line
def set_extremes(self, e1, e2):
self._extremes = e1, e2
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
x1, x2 = axes.get_xlim()
y1, y2 = axes.get_ylim()
grid_finder = self.grid_helper.grid_finder
extremes = grid_finder.extreme_finder(grid_finder.inv_transform_xy,
x1, y1, x2, y2)
extremes = list(extremes)
e1, e2 = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
if e1 is not None:
extremes[2] = max(e1, extremes[2])
if e2 is not None:
extremes[3] = min(e2, extremes[3])
elif self.nth_coord == 1:
if e1 is not None:
extremes[0] = max(e1, extremes[0])
if e2 is not None:
extremes[1] = min(e2, extremes[1])
grid_info = dict()
lon_min, lon_max, lat_min, lat_max = extremes
lon_levs, lon_n, lon_factor = \
grid_finder.grid_locator1(lon_min, lon_max)
lat_levs, lat_n, lat_factor = \
grid_finder.grid_locator2(lat_min, lat_max)
grid_info["extremes"] = extremes
grid_info["lon_info"] = lon_levs, lon_n, lon_factor
grid_info["lat_info"] = lat_levs, lat_n, lat_factor
grid_info["lon_labels"] = grid_finder.tick_formatter1("bottom",
lon_factor,
lon_levs)
grid_info["lat_labels"] = grid_finder.tick_formatter2("bottom",
lat_factor,
lat_levs)
grid_finder = self.grid_helper.grid_finder
#e1, e2 = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
xx0 = np.linspace(self.value, self.value, self._line_num_points)
yy0 = np.linspace(extremes[2], extremes[3], self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
elif self.nth_coord == 1:
xx0 = np.linspace(extremes[0], extremes[1], self._line_num_points)
yy0 = np.linspace(self.value, self.value, self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
grid_info["line_xy"] = xx, yy
self.grid_info = grid_info
def get_axislabel_transform(self, axes):
return Affine2D() #axes.transData
def get_axislabel_pos_angle(self, axes):
extremes = self.grid_info["extremes"]
if self.nth_coord == 0:
xx0 = self.value
yy0 = (extremes[2]+extremes[3])/2.
dxx, dyy = 0., abs(extremes[2]-extremes[3])/1000.
elif self.nth_coord == 1:
xx0 = (extremes[0]+extremes[1])/2.
yy0 = self.value
dxx, dyy = abs(extremes[0]-extremes[1])/1000., 0.
grid_finder = self.grid_helper.grid_finder
xx1, yy1 = grid_finder.transform_xy([xx0], [yy0])
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([xx1[0], yy1[0]])
if (0. <= p[0] <= 1.) and (0. <= p[1] <= 1.):
xx1c, yy1c = axes.transData.transform_point([xx1[0], yy1[0]])
xx2, yy2 = grid_finder.transform_xy([xx0+dxx], [yy0+dyy])
xx2c, yy2c = axes.transData.transform_point([xx2[0], yy2[0]])
return (xx1c, yy1c), np.arctan2(yy2c-yy1c, xx2c-xx1c)/np.pi*180.
else:
return None, None
def get_tick_transform(self, axes):
return IdentityTransform() #axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label, (optionally) tick_label"""
grid_finder = self.grid_helper.grid_finder
lat_levs, lat_n, lat_factor = self.grid_info["lat_info"]
lat_levs = np.asarray(lat_levs)
if lat_factor is not None:
yy0 = lat_levs / lat_factor
dy = 0.01 / lat_factor
else:
yy0 = lat_levs
dy = 0.01
lon_levs, lon_n, lon_factor = self.grid_info["lon_info"]
lon_levs = np.asarray(lon_levs)
if lon_factor is not None:
xx0 = lon_levs / lon_factor
dx = 0.01 / lon_factor
else:
xx0 = lon_levs
dx = 0.01
if None in self._extremes:
e0, e1 = self._extremes
else:
e0, e1 = sorted(self._extremes)
if e0 is None:
e0 = -np.inf
if e1 is None:
e1 = np.inf
if self.nth_coord == 0:
mask = (e0 <= yy0) & (yy0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
yy0 = yy0[mask]
elif self.nth_coord == 1:
mask = (e0 <= xx0) & (xx0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
xx0 = xx0[mask]
def transform_xy(x, y):
x1, y1 = grid_finder.transform_xy(x, y)
x2y2 = axes.transData.transform(np.array([x1, y1]).transpose())
x2, y2 = x2y2.transpose()
return x2, y2
# find angles
if self.nth_coord == 0:
xx0 = np.empty_like(yy0)
xx0.fill(self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx00 = xx0.copy()
xx00[xx0+dx>e1] -= dx
xx1a, yy1a = transform_xy(xx00, yy0)
xx1b, yy1b = transform_xy(xx00+dx, yy0)
xx2a, yy2a = transform_xy(xx0, yy0)
xx2b, yy2b = transform_xy(xx0, yy0+dy)
labels = self.grid_info["lat_labels"]
labels = [l for l, m in zip(labels, mask) if m]
elif self.nth_coord == 1:
yy0 = np.empty_like(xx0)
yy0.fill(self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx1a, yy1a = transform_xy(xx0, yy0)
xx1b, yy1b = transform_xy(xx0, yy0+dy)
xx00 = xx0.copy()
xx00[xx0+dx>e1] -= dx
xx2a, yy2a = transform_xy(xx00, yy0)
xx2b, yy2b = transform_xy(xx00+dx, yy0)
labels = self.grid_info["lon_labels"]
labels = [l for l, m in zip(labels, mask) if m]
def f1():
dd = np.arctan2(yy1b-yy1a, xx1b-xx1a) # angle normal
dd2 = np.arctan2(yy2b-yy2a, xx2b-xx2a) # angle tangent
mm = ((yy1b-yy1a)==0.) & ((xx1b-xx1a)==0.) # mask where dd1 is not defined
dd[mm] = dd2[mm]+3.14159/2.
#dd = np.arctan2(yy2-yy1, xx2-xx1) # angle normal
#dd2 = np.arctan2(yy3-yy1, xx3-xx1) # angle tangent
#mm = ((yy2-yy1)==0.) & ((xx2-xx1)==0.) # mask where dd1 is not defined
#dd[mm] = dd2[mm]+3.14159/2.
#dd += 3.14159
#dd = np.arctan2(xx2-xx1, angle_tangent-yy1)
trans_tick = self.get_tick_transform(axes)
tr2ax = trans_tick + axes.transAxes.inverted()
for x, y, d, d2, lab in zip(xx1, yy1, dd, dd2, labels):
c2 = tr2ax.transform_point((x, y))
delta=0.00001
if (0. -delta<= c2[0] <= 1.+delta) and \
(0. -delta<= c2[1] <= 1.+delta):
d1 = d/3.14159*180.
d2 = d2/3.14159*180.
yield [x, y], d1, d2, lab
return f1(), iter([])
def get_line_transform(self, axes):
return axes.transData
def get_line(self, axes):
self.update_lim(axes)
x, y = self.grid_info["line_xy"]
if self._get_line_path is None:
return Path(list(zip(x, y)))
else:
return self._get_line_path(axes, x, y)
class GridHelperCurveLinear(GridHelperBase):
def __init__(self, aux_trans,
extreme_finder=None,
grid_locator1=None,
grid_locator2=None,
tick_formatter1=None,
tick_formatter2=None):
"""
aux_trans : a transform from the source (curved) coordinate to
target (rectilinear) coordinate. An instance of MPL's Transform
(inverse transform should be defined) or a tuple of two callable
objects which defines the transform and its inverse. The callables
need take two arguments of array of source coordinates and
should return two target coordinates:
e.g., x2, y2 = trans(x1, y1)
"""
super(GridHelperCurveLinear, self).__init__()
self.grid_info = None
self._old_values = None
#self._grid_params = dict()
self._aux_trans = aux_trans
self.grid_finder = GridFinder(aux_trans,
extreme_finder,
grid_locator1,
grid_locator2,
tick_formatter1,
tick_formatter2)
def update_grid_finder(self, aux_trans=None, **kw):
if aux_trans is not None:
self.grid_finder.update_transform(aux_trans)
self.grid_finder.update(**kw)
self.invalidate()
def _update(self, x1, x2, y1, y2):
"bbox in 0-based image coordinates"
# update wcsgrid
if self.valid() and self._old_values == (x1, x2, y1, y2):
return
self._update_grid(x1, y1, x2, y2)
self._old_values = (x1, x2, y1, y2)
self._force_update = False
def new_fixed_axis(self, loc,
nth_coord=None,
axis_direction=None,
offset=None,
axes=None):
if axes is None:
axes = self.axes
if axis_direction is None:
axis_direction = loc
_helper = FixedAxisArtistHelper(self, loc,
#nth_coord,
nth_coord_ticks=nth_coord,
)
axisline = AxisArtist(axes, _helper, axis_direction=axis_direction)
return axisline
def new_floating_axis(self, nth_coord,
value,
axes=None,
axis_direction="bottom"
):
if axes is None:
axes = self.axes
_helper = FloatingAxisArtistHelper( \
self, nth_coord, value, axis_direction)
axisline = AxisArtist(axes, _helper)
#_helper = FloatingAxisArtistHelper(self, nth_coord,
# value,
# label_direction=label_direction,
# )
#axisline = AxisArtistFloating(axes, _helper,
# axis_direction=axis_direction)
axisline.line.set_clip_on(True)
axisline.line.set_clip_box(axisline.axes.bbox)
#axisline.major_ticklabels.set_visible(True)
#axisline.minor_ticklabels.set_visible(False)
#axisline.major_ticklabels.set_rotate_along_line(True)
#axisline.set_rotate_label_along_line(True)
return axisline
def _update_grid(self, x1, y1, x2, y2):
self.grid_info = self.grid_finder.get_grid_info(x1, y1, x2, y2)
def get_gridlines(self, which="major", axis="both"):
grid_lines = []
if axis in ["both", "x"]:
for gl in self.grid_info["lon"]["lines"]:
grid_lines.extend(gl)
if axis in ["both", "y"]:
for gl in self.grid_info["lat"]["lines"]:
grid_lines.extend(gl)
return grid_lines
def get_tick_iterator(self, nth_coord, axis_side, minor=False):
#axisnr = dict(left=0, bottom=1, right=2, top=3)[axis_side]
angle_tangent = dict(left=90, right=90, bottom=0, top=0)[axis_side]
#angle = [0, 90, 180, 270][axisnr]
lon_or_lat = ["lon", "lat"][nth_coord]
if not minor: # major ticks
def f():
for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, l
else:
def f():
for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, ""
#for xy, a, l in self.grid_info[lon_or_lat]["ticks"][axis_side]:
# yield xy, a, ""
return f()
def test3():
import numpy as np
from matplotlib.transforms import Transform
from matplotlib.path import Path
class MyTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
x = ll[:, 0:1]
y = ll[:, 1:2]
return np.concatenate((x, y-x), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MyTransformInv(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class MyTransformInv(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
x = ll[:, 0:1]
y = ll[:, 1:2]
return np.concatenate((x, y+x), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return MyTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
tr = MyTransform(1)
grid_helper = GridHelperCurveLinear(tr)
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot_class_factory
from .axislines import Axes
SubplotHost = host_subplot_class_factory(Axes)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
fig.add_subplot(ax1)
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
ax1.parasites.append(ax2)
ax2.plot([3, 6], [5.0, 10.])
ax1.set_aspect(1.)
ax1.set_xlim(0, 10)
ax1.set_ylim(0, 10)
ax1.grid(True)
plt.draw()
def curvelinear_test2(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1
import numpy as np
from . import angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost, \
ParasiteAxesAuxTrans
import matplotlib.cbook as cbook
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(5)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
# make ticklabels of right and top axis visible.
ax1.axis["right"].major_ticklabels.set_visible(True)
ax1.axis["top"].major_ticklabels.set_visible(True)
# let right axis shows ticklabels for 1st coordinate (angle)
ax1.axis["right"].get_helper().nth_coord_ticks=0
# let bottom axis shows ticklabels for 2nd coordinate (radius)
ax1.axis["bottom"].get_helper().nth_coord_ticks=1
fig.add_subplot(ax1)
grid_helper = ax1.get_grid_helper()
ax1.axis["lat"] = axis = grid_helper.new_floating_axis(0, 60, axes=ax1)
axis.label.set_text("Test")
axis.label.set_visible(True)
#axis._extremes = 2, 10
#axis.label.set_text("Test")
#axis.major_ticklabels.set_visible(False)
#axis.major_ticks.set_visible(False)
axis.get_helper()._extremes=2, 10
ax1.axis["lon"] = axis = grid_helper.new_floating_axis(1, 6, axes=ax1)
#axis.major_ticklabels.set_visible(False)
#axis.major_ticks.set_visible(False)
axis.label.set_text("Test 2")
axis.get_helper()._extremes=-180, 90
# A parasite axes with given transform
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# note that ax2.transData == tr + ax1.transData
# Anthing you draw in ax2 will match the ticks and grids of ax1.
ax1.parasites.append(ax2)
intp = cbook.simple_linear_interpolation
ax2.plot(intp(np.array([0, 30]), 50),
intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
def curvelinear_test3(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1, axis
import numpy as np
from . import angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(12)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
for axis in list(six.itervalues(ax1.axis)):
axis.set_visible(False)
fig.add_subplot(ax1)
grid_helper = ax1.get_grid_helper()
ax1.axis["lat1"] = axis = grid_helper.new_floating_axis(0, 130,
axes=ax1,
axis_direction="left"
)
axis.label.set_text("Test")
axis.label.set_visible(True)
axis.get_helper()._extremes=0.001, 10
grid_helper = ax1.get_grid_helper()
ax1.axis["lat2"] = axis = grid_helper.new_floating_axis(0, 50, axes=ax1,
axis_direction="right")
axis.label.set_text("Test")
axis.label.set_visible(True)
axis.get_helper()._extremes=0.001, 10
ax1.axis["lon"] = axis = grid_helper.new_floating_axis(1, 10,
axes=ax1,
axis_direction="bottom")
axis.label.set_text("Test 2")
axis.get_helper()._extremes= 50, 130
axis.major_ticklabels.set_axis_direction("top")
axis.label.set_axis_direction("top")
grid_helper.grid_finder.grid_locator1.den = 5
grid_helper.grid_finder.grid_locator2._nbins = 5
# # A parasite axes with given transform
# ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# # note that ax2.transData == tr + ax1.transData
# # Anthing you draw in ax2 will match the ticks and grids of ax1.
# ax1.parasites.append(ax2)
# intp = cbook.simple_linear_interpolation
# ax2.plot(intp(np.array([0, 30]), 50),
# intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1, figsize=(5, 5))
fig.clf()
#test3()
#curvelinear_test2(fig)
curvelinear_test3(fig)
#plt.draw()
plt.show()
|
gpl-3.0
|
gallantlab/pycortex
|
examples/quickstart/plot_retinotopy_flatmap.py
|
1
|
1424
|
"""
================================
Plot Example Retinotopy Flatmaps
================================
This demo shows how to plot example retinotopy data onto a subject's brain
on a flatmap. In order for this demo to work, you need to download this
dataset_, but that can also be done automatically through the `urllib`
command that is included.
.. _dataset: http://gallantlab.org/pycortex/S1_retinotopy.hdf
S1 is the example subject that comes with pycortex, but if you want to plot
data onto a different subject, you will need to have them in your filestore,
and you will also need a flatmap for them.
"""
import six
import cortex
import matplotlib.pyplot as plt
if six.PY2:
from urllib import urlretrieve
elif six.PY3:
from urllib.request import urlretrieve
# Download the dataset and load it
_ = urlretrieve("http://gallantlab.org/pycortex/S1_retinotopy.hdf",
"S1_retinotopy.hdf")
ret_data = cortex.load("S1_retinotopy.hdf")
# The retinotopy data has to be divided into left and right hemispheres
left_data = ret_data.angle_left
cortex.quickshow(left_data, with_curvature=True,
curvature_contrast=0.5,
curvature_brightness=0.5,
curvature_threshold=True)
plt.show()
right_data = ret_data.angle_right
cortex.quickshow(right_data, with_curvature=True,
curvature_contrast=0.5,
curvature_brightness=0.5,
curvature_threshold=True)
plt.show()
|
bsd-2-clause
|
liberatorqjw/scikit-learn
|
examples/classification/plot_lda_qda.py
|
17
|
4794
|
"""
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.lda import LDA
from sklearn.qda import QDA
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# LDA
lda = LDA()
y_pred = lda.fit(X, y, store_covariance=True).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# QDA
qda = QDA()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('LDA vs QDA')
plt.show()
|
bsd-3-clause
|
boltunoff/turo
|
turo_scraping/genr_turo_parse.py
|
1
|
13076
|
from selenium import webdriver
from bs4 import BeautifulSoup
import time
import datetime
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from random import choice
import os.path
import logging
import pandas as pd
#add generic log file location and name
logging.basicConfig(filename='turotask_minivans.log', filemode="w", level=logging.INFO,format='%(asctime)s %(message)s')
#Logging usage example:
#logging.debug("This is a debug message")
#logging.info("Informational message")
#logging.error("An error has happened!")
logging.info("Job started")
def driver_set():
ua_list = [
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/48.0.2564.82 Chrome/48.0.2564.82 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36",
"Mozilla/5.0 (X11; OpenBSD i386) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1664.3 Safari/537.36"
]
dcap = dict(DesiredCapabilities.PHANTOMJS, javascriptEnabled=True)
dcap["phantomjs.page.settings.resourceTimeout"] = 15
dcap["phantomjs.page.settings.loadImages"] = True
dcap["phantomjs.page.settings.userAgent"] = choice(ua_list)
driver = webdriver.PhantomJS(desired_capabilities=dcap) # PhantomJs should be in the same dir of python.py file within project
driver.set_window_size(1920,1080)
return driver
driver = driver_set()
# car_types: SUVS, MINIVANS, CARS.
# to search only for regular cars, excluding suvs and minivans, need to apply filter on web page, instead of just
# specifying url with /cars...
# add url and html objects for /cars only search results
def navigate_to_base_url(car_type, city):
url = "https://turo.com/rentals/%s" % car_type
driver.get(url)
driver.implicitly_wait(3)
time.sleep(5)
#click input search form:
driver.find_element_by_xpath('//*[@id="pageContainer-content"]/div[1]/div[1]/div[2]/form/div[1]/input[1]').click()
driver.implicitly_wait(3)
time.sleep(5)
#enter text into search box for locatoin:
driver.find_element_by_xpath('//*[@id="pageContainer-content"]/div[1]/div[1]/div[2]/form/div[1]/input[1]').send_keys(city)
driver.implicitly_wait(3) #also search by Chicago keyword, more cars available
time.sleep(5)
#click search button in Ohare for a week ahead
try:
driver.find_element_by_xpath('//*[@id="pageContainer-content"]/div[1]/div[1]/div[2]/form/div[4]/button').click()
driver.implicitly_wait(3)
time.sleep(5)
except Exception as e:
driver.save_screenshot('button.png')
raise
driver.implicitly_wait(3)
time.sleep(5)
url_now = driver.current_url # URL for Current week search
print(url_now)
return url_now
# parse current URL to find current search dates and update search dates for weeks ahead and for 3 days search
# update current URL with new start/end dates +7 days; +3 days
#all_cars[0].prettify()
#https://turo.com/search?type=10&location=Chicago%2C%20IL%2C%20USA&country=US®ion=IL&locationType=City&models=&makes=&fromYear=0&startDate=06%2F27%2F2017&startTime=10%3A00&endDate=07%2F04%2F2017&endTime=10%3A00&category=ALL&instantBook=false&customDelivery=false&maximumDistanceInMiles=30&sortType=RELEVANCE&isMapSearch=false&latitude=41.8781136&longitude=-87.6297982&defaultZoomLevel=11
#TODO: consider user input as: Type of Car(minivans, suvs, cars, trucks), City, Start Date and End Date.
# ??? use above url as a base having user inputs as a parameters...?
url_now = navigate_to_base_url('minivans', 'Chicago')
logging.info("Cleaning and parsing dates from URL")
def cln_dates(url_now):
url_lst = url_now.split("&")
for s in url_lst:
if 'startDate' in s:
startDate = s
elif 'endDate' in s:
endDate = s
start_list = startDate.split('=')
start_date_str = start_list[1].replace("2F", "-").split("%")
start_dt = "".join(start_date_str)
end_list = endDate.split('=')
end_date_str = end_list[1].replace("2F", "-").split("%")
end_dt = "".join(end_date_str)
return start_dt, end_dt
#(u'06-12-2017', u'06-19-2017') strings
#TODO convert to date formate and add 7 or 3 days and create new url with them
#startDate=05%2F11%2F2017 and endDate=05%2F18%2F2017
# a = u'06-27-2017'
# a1 = datetime.datetime.strptime(a,"%m-%d-%Y")
# a17 = a1 + datetime.timedelta(days=7)
# a17s = datetime.datetime.strftime(a17,"%m-%d-%Y")
def parse_data(): # add argument url_now
#driver = driver_set()
soup = BeautifulSoup(driver.page_source, "html.parser")
#driver.close()
#use the heirarchical nature of HTML structure to grab precisely the content that I am interested in
# I will grab all of the elements that are within "li" tags and are also members of class "u-baseBottomMargin"
all_cars = soup.find_all('li', {'class': 'u-baseBottomMargin'})
# //*[@id="pageContainer-content"]/div[3]/div[3]/div[2]/div/div[2]).get_get
#print "Example of the 1st element with text of HTML soup object: \n", all_cars[0]
#all_cars[0].span.get_text()
hrefs_list = [] # parse link to find car ID
car_id_list = []
year_list = []
make_list = [] # Make Model
for i in all_cars: # trying to combine all data elements in one loop and to add to dict
hrefs_list.append(i.a["href"])
href = i.a["href"]
car_id = href.split("/")[6][:6] # 6th element of the link, and 6th element of ID node /654321/
car_id_list.append(car_id)
year = i.span.get_text()
year_list.append(i.span.get_text())
model = i.p.get_text()
make_list.append(i.p.get_text())
prices = soup.find_all('p', {'class': 'vehicleWithDetails-value'}) # different element for pricess, couldn't get_text from all_cars
price_list = [] # Prices
for i in prices:
price_list.append(i.get_text())
logging.info("Prices conversion to floats started")
price_list_fl = [float(e) for e in price_list] #convert unicode to floats
#print "Minimum price for today: ", min(price_list_fl)
#print "Maximum price for today: ", max(price_list_fl)
#print "Average price for today: ", sum(price_list_fl)/float(len(price_list_fl)) #resolve unicode
publn_dt = time.strftime("%m/%d/%Y %H:%M:%S")
print("Search on: ", publn_dt)
logging.info("Found %d prices for the cars", len(price_list_fl))
search_start_dt, search_end_dt = cln_dates(url_now) #calling function to get dates
data = {
'search_start_dt': search_start_dt,
'search_end_dt':search_end_dt,
'publn_dt' : publn_dt,
'links' : hrefs_list,
'car_id' : car_id_list,
"year" : year_list,
'make' : make_list,
'price' : price_list_fl
}
df = pd.DataFrame(data)
df = df[['car_id','links','year','make','price','search_start_dt','search_end_dt','publn_dt']] #changing order of DF
print(df)
return df
fname = 'turo_minivans_data.csv'
import logging
# add filemode="w" to overwrite
#logging.basicConfig(filename="turo_parse3.log", filemode="w", level=logging.INFO)
#logging.debug("This is a debug message")
#logging.info("Informational message")
#logging.error("An error has happened!")
df = parse_data()
import os.path
logging.info("Writing data to CSV file... %s " % fname)
def write_file(df):
try:
if os.path.exists(fname): #todo may need to check if the load for the same date exists?
with open(fname, 'a') as f:
df.to_csv(f, header=False, index=False)
print(len(df), "records written to CSV file %s" % fname)
else:
with open(fname, 'a') as f:
df.to_csv(f, header=True, index=False)
print(len(df), "records written to CSV file %s" % fname)
except IOError:
logging.error("Can't open %s. Please check if the %s is now open" %(fname,fname))
logging.info("Nothing is written to the file")
data = write_file(df)
print('Job is done for one iteration')
# calculating future dates from the url_now
s, e = cln_dates(url_now)
s_dt = datetime.datetime.strptime(s, "%m-%d-%Y") #convert to date format from string
e_dt_in6m = datetime.timedelta(days = 7 *26) # start date in url in 6 months(26 weeks)
def timespan(s_dt, e_dt_in6m, delta=datetime.timedelta(days=7)): #returns dates for 6 months ahead
curr_dt_plus7 = s_dt + datetime.timedelta(days = 7) # first search date for a week ahead
while curr_dt_plus7 < e_dt_in6m:
yield curr_dt_plus7
curr_dt_plus7 += delta
#for day in timespan(s_dt, e_dt_in6m, delta=datetime.timedelta(days=7)):
#url_coll =
# print day
def date_repl_url():
for day in timespan(s_dt, e_dt_in6m, delta=datetime.timedelta(days=7)):
url_coll = []
s_dt_str = datetime.datetime.strftime(day, '%m-%d-%Y')
s_dt_str_url = s_dt_str.replace('-', '%2F')
def future_url():
# 1. add 7 days to start and end dt
# 2. substitute dates in URL
# 3. run all the routine again.
# convert Dates like this:
# startDate=05%2F11%2F2017 and endDate=05%2F18%2F2017
# a = u'06-27-2017'
# a1 = datetime.datetime.strptime(a,"%m-%d-%Y")
# a17 = a1 + datetime.timedelta(days=7)
# a17s = datetime.datetime.strftime(a17,"%m-%d-%Y")
pass
# good example: https://github.com/fankcoder/findtrip/blob/master/findtrip/findtrip/spiders/spider_ctrip.py
# https://github.com/ianibo/SirsiDynixIBistroScraper/blob/master/scraper.py
#TODO: search on diff timelines: a week ahead, a month ahead, 6 months ahead
#TODO: when future dates function is ready, decouple main functions from iterations(each week, each car type, etc),
# create logs for each iteration.
# TODO: stopped working...
# change how ContentContainer is searched:
# url = "https://turo.com/search?country=US&defaultZoomLevel=11&endDate=12%2F21%2F2018&endTime=10%3A00&international=true&isMapSearch=false&itemsPerPage=200&latitude=41.8781136&location=Chicago%2C%20IL%2C%20USA&locationType=City&longitude=-87.6297982&maximumDistanceInMiles=30®ion=IL&sortType=RELEVANCE&startDate=12%2F18%2F2018&startTime=10%3A00"
# driver.get(url)
# soup = BeautifulSoup(driver.page_source,"html.parser")
# pageContainercontent = soup.find_all('div', attrs={'id': 'pageContainer-content'})
#
# Python 3.6 use new library to render JS:
# https://html.python-requests.org/
# from requests_html import HTMLSession
# session = HTMLSession()
# r = session.get(url)
#r.html.render(sleep=10)
# r.html.render(sleep=10)
#//*[@id="pageContainer-content"]/div[3]/div/div/div[2]/div/div[1]/div/div/div[1]/div/div[1]/div/div/a
# New search
# 'https://turo.com/rentals/minivans'
# driver.find_element_by_xpath('//*[@id="search-input-header"]').click() # top search bar activating
# driver.find_element_by_xpath('//*[@id="search-input-header"]').send_keys('chicago') # top search bar typed city/location
# driver.find_element_by_xpath('//*[@id="pageContainer"]/header/div/div[1]/form/span[1]/button').click() # top bar search button
# driver.current_url:
# 'https://turo.com/search?location=chicago&country=®ion=&locationType='
##############
##############
# driver.execute() - running JavaScript
# driver.get(url)
# searchbar_el = driver.find_element_by_xpath('//*[@id="search-input-header"]') # found element with JavaScript
# driver.execute_script("arguments[0].value='Chicago';", searchbar_el) # passing Chicago to JavaScript, running JS
url = 'https://turo.com/'
driver.get(url)
search_form_el = driver.find_element_by_xpath('//*[@id="js-searchFormExpandedLocationInput"]') # found element with JavaScript
driver.save_screenshot('chgo_srch.png')
driver.execute_script("arguments[0].value='Chicago';", search_form_el) # passing Chicago to JavaScript, running JS
driver.save_screenshot('chgo_srch.png')
driver.find_element_by_xpath('//*[@id="js-searchFormExpanded"]/button[2]').click() # click search button
driver.save_screenshot('chgo_srch.png')
driver.current_url
# 'https://turo.com/search?location=Chicago&country=®ion=&locationType=&startDate=11%2F12%2F2018&startTime=10%3A00&endDate=11%2F19%2F2018&endTime=10%3A00'
##### from this url drilling down to search results:
db_srch_el = driver.find_element_by_xpath('//*[@id="pageContainer"]')
pageContainercontent = db_srch_el.find_element_by_xpath('//*[@id="pageContainer-content"]')
### !!! stuck here on rework.. can't find links to cars from pageContainer-content :(
# try... chromdriver - it renders javascript ?
# try adding library: from requests_html import HTMLSession to get all the links for the cars...?
|
gpl-3.0
|
pjryan126/solid-start-careers
|
store/api/zillow/venv/lib/python2.7/site-packages/pandas/tests/series/test_datetime_values.py
|
1
|
16749
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, bdate_range,
date_range, period_range, timedelta_range)
from pandas.tseries.period import PeriodIndex
from pandas.tseries.index import Timestamp, DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
import pandas.core.common as com
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesDatetimeValues(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_dt_namespace_accessor(self):
# GH 7207
# test .dt namespace accessor
ok_for_base = ['year', 'month', 'day', 'hour', 'minute', 'second',
'weekofyear', 'week', 'dayofweek', 'weekday',
'dayofyear', 'quarter', 'freq', 'days_in_month',
'daysinmonth']
ok_for_period = ok_for_base + ['qyear', 'start_time', 'end_time']
ok_for_period_methods = ['strftime', 'to_timestamp', 'asfreq']
ok_for_dt = ok_for_base + ['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end',
'is_year_start', 'is_year_end', 'tz']
ok_for_dt_methods = ['to_period', 'to_pydatetime', 'tz_localize',
'tz_convert', 'normalize', 'strftime', 'round',
'floor', 'ceil']
ok_for_td = ['days', 'seconds', 'microseconds', 'nanoseconds']
ok_for_td_methods = ['components', 'to_pytimedelta', 'total_seconds',
'round', 'floor', 'ceil']
def get_expected(s, name):
result = getattr(Index(s._values), prop)
if isinstance(result, np.ndarray):
if com.is_integer_dtype(result):
result = result.astype('int64')
elif not com.is_list_like(result):
return result
return Series(result, index=s.index, name=s.name)
def compare(s, name):
a = getattr(s.dt, prop)
b = get_expected(s, prop)
if not (com.is_list_like(a) and com.is_list_like(b)):
self.assertEqual(a, b)
else:
tm.assert_series_equal(a, b)
# datetimeindex
cases = [Series(date_range('20130101', periods=5), name='xxx'),
Series(date_range('20130101', periods=5, freq='s'),
name='xxx'),
Series(date_range('20130101 00:00:00', periods=5, freq='ms'),
name='xxx')]
for s in cases:
for prop in ok_for_dt:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_dt_methods:
getattr(s.dt, prop)
result = s.dt.to_pydatetime()
self.assertIsInstance(result, np.ndarray)
self.assertTrue(result.dtype == object)
result = s.dt.tz_localize('US/Eastern')
exp_values = DatetimeIndex(s.values).tz_localize('US/Eastern')
expected = Series(exp_values, index=s.index, name='xxx')
tm.assert_series_equal(result, expected)
tz_result = result.dt.tz
self.assertEqual(str(tz_result), 'US/Eastern')
freq_result = s.dt.freq
self.assertEqual(freq_result, DatetimeIndex(s.values,
freq='infer').freq)
# let's localize, then convert
result = s.dt.tz_localize('UTC').dt.tz_convert('US/Eastern')
exp_values = (DatetimeIndex(s.values).tz_localize('UTC')
.tz_convert('US/Eastern'))
expected = Series(exp_values, index=s.index, name='xxx')
tm.assert_series_equal(result, expected)
# round
s = Series(pd.to_datetime(['2012-01-01 13:00:00',
'2012-01-01 12:01:00',
'2012-01-01 08:00:00']), name='xxx')
result = s.dt.round('D')
expected = Series(pd.to_datetime(['2012-01-02', '2012-01-02',
'2012-01-01']), name='xxx')
tm.assert_series_equal(result, expected)
# round with tz
result = (s.dt.tz_localize('UTC')
.dt.tz_convert('US/Eastern')
.dt.round('D'))
exp_values = pd.to_datetime(['2012-01-01', '2012-01-01',
'2012-01-01']).tz_localize('US/Eastern')
expected = Series(exp_values, name='xxx')
tm.assert_series_equal(result, expected)
# floor
s = Series(pd.to_datetime(['2012-01-01 13:00:00',
'2012-01-01 12:01:00',
'2012-01-01 08:00:00']), name='xxx')
result = s.dt.floor('D')
expected = Series(pd.to_datetime(['2012-01-01', '2012-01-01',
'2012-01-01']), name='xxx')
tm.assert_series_equal(result, expected)
# ceil
s = Series(pd.to_datetime(['2012-01-01 13:00:00',
'2012-01-01 12:01:00',
'2012-01-01 08:00:00']), name='xxx')
result = s.dt.ceil('D')
expected = Series(pd.to_datetime(['2012-01-02', '2012-01-02',
'2012-01-02']), name='xxx')
tm.assert_series_equal(result, expected)
# datetimeindex with tz
s = Series(date_range('20130101', periods=5, tz='US/Eastern'),
name='xxx')
for prop in ok_for_dt:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_dt_methods:
getattr(s.dt, prop)
result = s.dt.to_pydatetime()
self.assertIsInstance(result, np.ndarray)
self.assertTrue(result.dtype == object)
result = s.dt.tz_convert('CET')
expected = Series(s._values.tz_convert('CET'),
index=s.index, name='xxx')
tm.assert_series_equal(result, expected)
tz_result = result.dt.tz
self.assertEqual(str(tz_result), 'CET')
freq_result = s.dt.freq
self.assertEqual(freq_result, DatetimeIndex(s.values,
freq='infer').freq)
# timedeltaindex
cases = [Series(timedelta_range('1 day', periods=5),
index=list('abcde'), name='xxx'),
Series(timedelta_range('1 day 01:23:45', periods=5,
freq='s'), name='xxx'),
Series(timedelta_range('2 days 01:23:45.012345', periods=5,
freq='ms'), name='xxx')]
for s in cases:
for prop in ok_for_td:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_td_methods:
getattr(s.dt, prop)
result = s.dt.components
self.assertIsInstance(result, DataFrame)
tm.assert_index_equal(result.index, s.index)
result = s.dt.to_pytimedelta()
self.assertIsInstance(result, np.ndarray)
self.assertTrue(result.dtype == object)
result = s.dt.total_seconds()
self.assertIsInstance(result, pd.Series)
self.assertTrue(result.dtype == 'float64')
freq_result = s.dt.freq
self.assertEqual(freq_result, TimedeltaIndex(s.values,
freq='infer').freq)
# both
index = date_range('20130101', periods=3, freq='D')
s = Series(date_range('20140204', periods=3, freq='s'),
index=index, name='xxx')
exp = Series(np.array([2014, 2014, 2014], dtype='int64'),
index=index, name='xxx')
tm.assert_series_equal(s.dt.year, exp)
exp = Series(np.array([2, 2, 2], dtype='int64'),
index=index, name='xxx')
tm.assert_series_equal(s.dt.month, exp)
exp = Series(np.array([0, 1, 2], dtype='int64'),
index=index, name='xxx')
tm.assert_series_equal(s.dt.second, exp)
exp = pd.Series([s[0]] * 3, index=index, name='xxx')
tm.assert_series_equal(s.dt.normalize(), exp)
# periodindex
cases = [Series(period_range('20130101', periods=5, freq='D'),
name='xxx')]
for s in cases:
for prop in ok_for_period:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_period_methods:
getattr(s.dt, prop)
freq_result = s.dt.freq
self.assertEqual(freq_result, PeriodIndex(s.values).freq)
# test limited display api
def get_dir(s):
results = [r for r in s.dt.__dir__() if not r.startswith('_')]
return list(sorted(set(results)))
s = Series(date_range('20130101', periods=5, freq='D'), name='xxx')
results = get_dir(s)
tm.assert_almost_equal(
results, list(sorted(set(ok_for_dt + ok_for_dt_methods))))
s = Series(period_range('20130101', periods=5,
freq='D', name='xxx').asobject)
results = get_dir(s)
tm.assert_almost_equal(
results, list(sorted(set(ok_for_period + ok_for_period_methods))))
# 11295
# ambiguous time error on the conversions
s = Series(pd.date_range('2015-01-01', '2016-01-01',
freq='T'), name='xxx')
s = s.dt.tz_localize('UTC').dt.tz_convert('America/Chicago')
results = get_dir(s)
tm.assert_almost_equal(
results, list(sorted(set(ok_for_dt + ok_for_dt_methods))))
exp_values = pd.date_range('2015-01-01', '2016-01-01', freq='T',
tz='UTC').tz_convert('America/Chicago')
expected = Series(exp_values, name='xxx')
tm.assert_series_equal(s, expected)
# no setting allowed
s = Series(date_range('20130101', periods=5, freq='D'), name='xxx')
with tm.assertRaisesRegexp(ValueError, "modifications"):
s.dt.hour = 5
# trying to set a copy
with pd.option_context('chained_assignment', 'raise'):
def f():
s.dt.hour[0] = 5
self.assertRaises(com.SettingWithCopyError, f)
def test_dt_accessor_no_new_attributes(self):
# https://github.com/pydata/pandas/issues/10673
s = Series(date_range('20130101', periods=5, freq='D'))
with tm.assertRaisesRegexp(AttributeError,
"You cannot add any new attribute"):
s.dt.xlabel = "a"
def test_strftime(self):
# GH 10086
s = Series(date_range('20130101', periods=5))
result = s.dt.strftime('%Y/%m/%d')
expected = Series(['2013/01/01', '2013/01/02', '2013/01/03',
'2013/01/04', '2013/01/05'])
tm.assert_series_equal(result, expected)
s = Series(date_range('2015-02-03 11:22:33.4567', periods=5))
result = s.dt.strftime('%Y/%m/%d %H-%M-%S')
expected = Series(['2015/02/03 11-22-33', '2015/02/04 11-22-33',
'2015/02/05 11-22-33', '2015/02/06 11-22-33',
'2015/02/07 11-22-33'])
tm.assert_series_equal(result, expected)
s = Series(period_range('20130101', periods=5))
result = s.dt.strftime('%Y/%m/%d')
expected = Series(['2013/01/01', '2013/01/02', '2013/01/03',
'2013/01/04', '2013/01/05'])
tm.assert_series_equal(result, expected)
s = Series(period_range(
'2015-02-03 11:22:33.4567', periods=5, freq='s'))
result = s.dt.strftime('%Y/%m/%d %H-%M-%S')
expected = Series(['2015/02/03 11-22-33', '2015/02/03 11-22-34',
'2015/02/03 11-22-35', '2015/02/03 11-22-36',
'2015/02/03 11-22-37'])
tm.assert_series_equal(result, expected)
s = Series(date_range('20130101', periods=5))
s.iloc[0] = pd.NaT
result = s.dt.strftime('%Y/%m/%d')
expected = Series(['NaT', '2013/01/02', '2013/01/03', '2013/01/04',
'2013/01/05'])
tm.assert_series_equal(result, expected)
datetime_index = date_range('20150301', periods=5)
result = datetime_index.strftime("%Y/%m/%d")
expected = np.array(
['2015/03/01', '2015/03/02', '2015/03/03', '2015/03/04',
'2015/03/05'], dtype=object)
self.assert_numpy_array_equal(result, expected)
period_index = period_range('20150301', periods=5)
result = period_index.strftime("%Y/%m/%d")
expected = np.array(
['2015/03/01', '2015/03/02', '2015/03/03', '2015/03/04',
'2015/03/05'], dtype=object)
self.assert_numpy_array_equal(result, expected)
s = Series([datetime(2013, 1, 1, 2, 32, 59), datetime(2013, 1, 2, 14,
32, 1)])
result = s.dt.strftime('%Y-%m-%d %H:%M:%S')
expected = Series(["2013-01-01 02:32:59", "2013-01-02 14:32:01"])
tm.assert_series_equal(result, expected)
s = Series(period_range('20130101', periods=4, freq='H'))
result = s.dt.strftime('%Y/%m/%d %H:%M:%S')
expected = Series(["2013/01/01 00:00:00", "2013/01/01 01:00:00",
"2013/01/01 02:00:00", "2013/01/01 03:00:00"])
s = Series(period_range('20130101', periods=4, freq='L'))
result = s.dt.strftime('%Y/%m/%d %H:%M:%S.%l')
expected = Series(
["2013/01/01 00:00:00.000", "2013/01/01 00:00:00.001",
"2013/01/01 00:00:00.002", "2013/01/01 00:00:00.003"])
tm.assert_series_equal(result, expected)
def test_valid_dt_with_missing_values(self):
from datetime import date, time
# GH 8689
s = Series(date_range('20130101', periods=5, freq='D'))
s.iloc[2] = pd.NaT
for attr in ['microsecond', 'nanosecond', 'second', 'minute', 'hour',
'day']:
expected = getattr(s.dt, attr).copy()
expected.iloc[2] = np.nan
result = getattr(s.dt, attr)
tm.assert_series_equal(result, expected)
result = s.dt.date
expected = Series(
[date(2013, 1, 1), date(2013, 1, 2), np.nan, date(2013, 1, 4),
date(2013, 1, 5)], dtype='object')
tm.assert_series_equal(result, expected)
result = s.dt.time
expected = Series(
[time(0), time(0), np.nan, time(0), time(0)], dtype='object')
tm.assert_series_equal(result, expected)
def test_dt_accessor_api(self):
# GH 9322
from pandas.tseries.common import (CombinedDatetimelikeProperties,
DatetimeProperties)
self.assertIs(Series.dt, CombinedDatetimelikeProperties)
s = Series(date_range('2000-01-01', periods=3))
self.assertIsInstance(s.dt, DatetimeProperties)
for s in [Series(np.arange(5)), Series(list('abcde')),
Series(np.random.randn(5))]:
with tm.assertRaisesRegexp(AttributeError,
"only use .dt accessor"):
s.dt
self.assertFalse(hasattr(s, 'dt'))
def test_sub_of_datetime_from_TimeSeries(self):
from pandas.tseries.timedeltas import to_timedelta
from datetime import datetime
a = Timestamp(datetime(1993, 0o1, 0o7, 13, 30, 00))
b = datetime(1993, 6, 22, 13, 30)
a = Series([a])
result = to_timedelta(np.abs(a - b))
self.assertEqual(result.dtype, 'timedelta64[ns]')
def test_between(self):
s = Series(bdate_range('1/1/2000', periods=20).asobject)
s[::2] = np.nan
result = s[s.between(s[3], s[17])]
expected = s[3:18].dropna()
assert_series_equal(result, expected)
result = s[s.between(s[3], s[17], inclusive=False)]
expected = s[5:16].dropna()
assert_series_equal(result, expected)
|
gpl-2.0
|
JonnaStalring/AZOrange
|
ConfPred/conformal-master/examples/validate_icpc.py
|
2
|
1381
|
import Orange
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from nonconformist.icp import IcpClassifier
from nonconformist.nc import ProbEstClassifierNc, margin
from myicp import ICP
def split_data(data, n_train, n_test):
n_train = n_train*len(data)//(n_train+n_test)
n_test = len(data)-n_train
ind = np.random.permutation(len(data))
return data[ind[:n_train]], data[ind[n_train:n_train+n_test]]
#data = Orange.data.Table("../data/usps.tab")
data = Orange.data.Table("iris")
for sig in np.linspace(0.0, 0.4, 11):
errs, szs = [], []
for rep in range(10):
#train, test = split_data(data, 7200, 2098)
train, test = split_data(data, 2, 1)
train, calib = split_data(train, 2, 1)
#icp = IcpClassifier(ProbEstClassifierNc(DecisionTreeClassifier(), margin))
icp = IcpClassifier(ProbEstClassifierNc(LogisticRegression(), margin))
#icp = ICP()
icp.fit(train.X, train.Y)
icp.calibrate(calib.X, calib.Y)
pred = icp.predict(test.X, significance=sig)
acc = sum(p[y] for p, y in zip(pred, test.Y))/len(pred)
err = 1-acc
sz = sum(sum(p) for p in pred)/len(pred)
errs.append(err)
szs.append(sz)
print(sig, np.mean(errs), np.mean(szs))
|
lgpl-3.0
|
sstoma/CellProfiler
|
cellprofiler/modules/saveimages.py
|
1
|
60060
|
'''<b>Save Images </b> saves image or movie files.
<hr>
Because CellProfiler usually performs many image analysis steps on many
groups of images, it does <i>not</i> save any of the resulting images to the
hard drive unless you specifically choose to do so with the <b>SaveImages</b>
module. You can save any of the
processed images created by CellProfiler during the analysis using this module.
<p>You can choose from many different image formats for saving your files. This
allows you to use the module as a file format converter, by loading files
in their original format and then saving them in an alternate format.</p>
<p>Note that saving images in 12-bit format is not supported, and 16-bit format
is supported for TIFF only.</p>
See also <b>NamesAndTypes</b>, <b>ConserveMemory</b>.
'''
# CellProfiler is distributed under the GNU General Public License.
# See the accompanying file LICENSE for details.
#
# Copyright (c) 2003-2009 Massachusetts Institute of Technology
# Copyright (c) 2009-2015 Broad Institute
#
# Please see the AUTHORS file for credits.
#
# Website: http://www.cellprofiler.org
import logging
import matplotlib
import numpy as np
import re
import os
import sys
import scipy.io.matlab.mio
import traceback
logger = logging.getLogger(__name__)
import cellprofiler.cpmodule as cpm
import cellprofiler.measurements as cpmeas
import cellprofiler.settings as cps
from cellprofiler.settings import YES, NO
import cellprofiler.preferences as cpp
from cellprofiler.gui.help import USING_METADATA_TAGS_REF, USING_METADATA_HELP_REF
from cellprofiler.preferences import \
standardize_default_folder_names, DEFAULT_INPUT_FOLDER_NAME, \
DEFAULT_OUTPUT_FOLDER_NAME, ABSOLUTE_FOLDER_NAME, \
DEFAULT_INPUT_SUBFOLDER_NAME, DEFAULT_OUTPUT_SUBFOLDER_NAME, \
IO_FOLDER_CHOICE_HELP_TEXT, IO_WITH_METADATA_HELP_TEXT, \
get_default_image_directory
from cellprofiler.utilities.relpath import relpath
from cellprofiler.modules.loadimages import C_FILE_NAME, C_PATH_NAME, C_URL
from cellprofiler.modules.loadimages import \
C_OBJECTS_FILE_NAME, C_OBJECTS_PATH_NAME, C_OBJECTS_URL
from cellprofiler.modules.loadimages import pathname2url
from cellprofiler.cpmath.cpmorphology import distance_color_labels
from cellprofiler.utilities.version import get_version
from bioformats.formatwriter import write_image
import bioformats.omexml as ome
IF_IMAGE = "Image"
IF_MASK = "Mask"
IF_CROPPING = "Cropping"
IF_FIGURE = "Module window"
IF_MOVIE = "Movie"
IF_OBJECTS = "Objects"
IF_ALL = [IF_IMAGE, IF_MASK, IF_CROPPING, IF_MOVIE, IF_OBJECTS]
OLD_BIT_DEPTH_8 = "8"
OLD_BIT_DEPTH_16 = "16"
BIT_DEPTH_8 = "8-bit integer"
BIT_DEPTH_16 = "16-bit integer"
BIT_DEPTH_FLOAT = "32-bit floating point"
FN_FROM_IMAGE = "From image filename"
FN_SEQUENTIAL = "Sequential numbers"
FN_SINGLE_NAME = "Single name"
SINGLE_NAME_TEXT = "Enter single file name"
FN_WITH_METADATA = "Name with metadata"
FN_IMAGE_FILENAME_WITH_METADATA = "Image filename with metadata"
METADATA_NAME_TEXT = ("""Enter file name with metadata""")
SEQUENTIAL_NUMBER_TEXT = "Enter file prefix"
FF_BMP = "bmp"
FF_JPG = "jpg"
FF_JPEG = "jpeg"
FF_PBM = "pbm"
FF_PCX = "pcx"
FF_PGM = "pgm"
FF_PNG = "png"
FF_PNM = "pnm"
FF_PPM = "ppm"
FF_RAS = "ras"
FF_TIF = "tif"
FF_TIFF = "tiff"
FF_XWD = "xwd"
FF_AVI = "avi"
FF_MAT = "mat"
FF_MOV = "mov"
FF_SUPPORTING_16_BIT = [FF_TIF, FF_TIFF]
PC_WITH_IMAGE = "Same folder as image"
OLD_PC_WITH_IMAGE_VALUES = ["Same folder as image"]
PC_CUSTOM = "Custom"
PC_WITH_METADATA = "Custom with metadata"
WS_EVERY_CYCLE = "Every cycle"
WS_FIRST_CYCLE = "First cycle"
WS_LAST_CYCLE = "Last cycle"
CM_GRAY = "gray"
GC_GRAYSCALE = "Grayscale"
GC_COLOR = "Color"
'''Offset to the directory path setting'''
OFFSET_DIRECTORY_PATH = 11
'''Offset to the bit depth setting in version 11'''
OFFSET_BIT_DEPTH_V11 = 12
class SaveImages(cpm.CPModule):
module_name = "SaveImages"
variable_revision_number = 11
category = "File Processing"
def create_settings(self):
self.save_image_or_figure = cps.Choice(
"Select the type of image to save",
IF_ALL,
IF_IMAGE,doc="""
The following types of images can be saved as a file on the hard drive:
<ul>
<li><i>%(IF_IMAGE)s:</i> Any of the images produced upstream of <b>SaveImages</b> can be selected for saving.
Outlines created by <b>Identify</b> modules can also be saved with this option, but you must
select "Retain outlines..." of identified objects within the <b>Identify</b> module. You might
also want to use the <b>OverlayOutlines</b> module prior to saving images.</li>
<li><i>%(IF_MASK)s:</i> Relevant only if the <b>Crop</b> module is used. The <b>Crop</b> module
creates a mask of the pixels of interest in the image. Saving the mask will produce a
binary image in which the pixels of interest are set to 1; all other pixels are
set to 0.</li>
<li><i>%(IF_CROPPING)s:</i> Relevant only if the <b>Crop</b> module is used. The <b>Crop</b>
module also creates a cropping image which is typically the same size as the original
image. However, since the <b>Crop</b> permits removal of the rows and columns that are left
blank, the cropping can be of a different size than the mask.</li>
<li><i>%(IF_MOVIE)s:</i> A sequence of images can be saved as a movie file. Currently only AVIs can be written.
Each image becomes a frame of the movie.</li>
<li><i>%(IF_OBJECTS)s:</i> Objects can be saved as an image. The image
is saved as grayscale unless you select a color map other than
gray. Background pixels appear as black and
each object is assigned an intensity level corresponding to
its object number. The resulting image can be loaded as objects
by the <b>NamesAndTypes</b> module. Objects are best saved as TIF
files. <b>SaveImages</b> will use an 8-bit TIF file if there
are fewer than 256 objects and will use a 16-bit TIF otherwise.
Results may be unpredictable if you save using PNG and there
are more than 255 objects or if you save using one of the other
file formats.</li>
</ul>"""%globals())
self.image_name = cps.ImageNameSubscriber(
"Select the image to save",cps.NONE, doc = """
<i>(Used only if "%(IF_IMAGE)s", "%(IF_MASK)s" or "%(IF_CROPPING)s" are selected to save)</i><br>
Select the image you want to save."""%globals())
self.objects_name = cps.ObjectNameSubscriber(
"Select the objects to save", cps.NONE,doc = """
<i>(Used only if saving "%(IF_OBJECTS)s")</i><br>
Select the objects that you want to save."""%globals())
self.figure_name = cps.FigureSubscriber(
"Select the module display window to save",cps.NONE,doc="""
<i>(Used only if saving "%(IF_FIGURE)s")</i><br>
Enter the module number/name for which you want to
save the module display window."""%globals())
self.file_name_method = cps.Choice(
"Select method for constructing file names",
[FN_FROM_IMAGE, FN_SEQUENTIAL,
FN_SINGLE_NAME],
FN_FROM_IMAGE,doc="""
<i>(Used only if saving non-movie files)</i><br>
Several choices are available for constructing the image file name:
<ul>
<li><i>%(FN_FROM_IMAGE)s:</i> The filename will be constructed based
on the original filename of an input image specified in <b>NamesAndTypes</b>.
You will have the opportunity to prefix or append
additional text.
<p>If you have metadata associated with your images, you can append an text
to the image filename using a metadata tag. This is especially useful if you
want your output given a unique label according to the metadata corresponding
to an image group. The name of the metadata to substitute can be provided for
each image for each cycle using the <b>Metadata</b> module.
%(USING_METADATA_TAGS_REF)s%(USING_METADATA_HELP_REF)s.</p></li>
<li><i>%(FN_SEQUENTIAL)s:</i> Same as above, but in addition, each filename
will have a number appended to the end that corresponds to
the image cycle number (starting at 1).</li>
<li><i>%(FN_SINGLE_NAME)s:</i> A single name will be given to the
file. Since the filename is fixed, this file will be overwritten with each cycle.
In this case, you would probably want to save the image on the last cycle
(see the <i>Select how often to save</i> setting). The exception to this is to
use a metadata tag to provide a unique label, as mentioned
in the <i>%(FN_FROM_IMAGE)s</i> option.</li>
</ul>"""%globals())
self.file_image_name = cps.FileImageNameSubscriber(
"Select image name for file prefix",
cps.NONE,doc="""
<i>(Used only when "%(FN_FROM_IMAGE)s" is selected for contructing the filename)</i><br>
Select an image loaded using <b>NamesAndTypes</b>. The original filename will be
used as the prefix for the output filename."""%globals())
self.single_file_name = cps.Text(
SINGLE_NAME_TEXT, "OrigBlue",
metadata = True, doc="""
<i>(Used only when "%(FN_SEQUENTIAL)s" or "%(FN_SINGLE_NAME)s" are selected for contructing the filename)</i><br>
Specify the filename text here. If you have metadata
associated with your images, enter the filename text with the metadata tags. %(USING_METADATA_TAGS_REF)s<br>
Do not enter the file extension in this setting; it will be appended automatically."""%globals())
self.number_of_digits = cps.Integer(
"Number of digits", 4, doc="""
<i>(Used only when "%(FN_SEQUENTIAL)s" is selected for contructing the filename)</i><br>
Specify the number of digits to be used for the sequential numbering. Zeros will be
used to left-pad the digits. If the number specified here is less than that needed to
contain the number of image sets, the latter will override the value entered."""%globals())
self.wants_file_name_suffix = cps.Binary(
"Append a suffix to the image file name?", False, doc = """
Select <i>%(YES)s</i> to add a suffix to the image's file name.
Select <i>%(NO)s</i> to use the image name as-is."""%globals())
self.file_name_suffix = cps.Text(
"Text to append to the image name",
"", metadata = True, doc="""
<i>(Used only when constructing the filename from the image filename)</i><br>
Enter the text that should be appended to the filename specified above.""")
self.file_format = cps.Choice(
"Saved file format",
[FF_BMP, FF_JPG, FF_JPEG, FF_PNG, FF_TIF, FF_TIFF, FF_MAT],
value = FF_TIF, doc="""
<i>(Used only when saving non-movie files)</i><br>
Select the image or movie format to save the image(s). Most common
image formats are available; MAT-files are readable by MATLAB.""")
self.movie_format = cps.Choice(
"Saved movie format",
[FF_AVI, FF_TIF, FF_MOV],
value = FF_AVI, doc="""
<i>(Used only when saving movie files)</i><br>
Select the movie format to use when saving movies. AVI and MOV
store images from successive image sets as movie frames. TIF
stores each image as an image plane in a TIF stack.
""")
self.pathname = SaveImagesDirectoryPath(
"Output file location", self.file_image_name,doc = """
<i>(Used only when saving non-movie files)</i><br>
This setting lets you choose the folder for the output
files. %(IO_FOLDER_CHOICE_HELP_TEXT)s
<p>An additional option is the following:
<ul>
<li><i>Same folder as image</i>: Place the output file in the same folder
that the source image is located.</li>
</ul></p>
<p>%(IO_WITH_METADATA_HELP_TEXT)s %(USING_METADATA_TAGS_REF)s.
For instance, if you have a metadata tag named
"Plate", you can create a per-plate folder by selecting one the subfolder options
and then specifying the subfolder name as "\g<Plate>". The module will
substitute the metadata values for the current image set for any metadata tags in the
folder name.%(USING_METADATA_HELP_REF)s.</p>
<p>If the subfolder does not exist when the pipeline is run, CellProfiler will
create it.</p>
<p>If you are creating nested subfolders using the sub-folder options, you can
specify the additional folders separated with slashes. For example, "Outlines/Plate1" will create
a "Plate1" folder in the "Outlines" folder, which in turn is under the Default
Input/Output Folder. The use of a forward slash ("/") as a folder separator will
avoid ambiguity between the various operating systems.</p>"""%globals())
# TODO:
self.bit_depth = cps.Choice(
"Image bit depth",
[BIT_DEPTH_8, BIT_DEPTH_16, BIT_DEPTH_FLOAT],doc="""
<i>(Used only when saving files in a non-MAT format)</i><br>
Select the bit-depth at which you want to save the images.
<i>%(BIT_DEPTH_FLOAT)s</i> saves the image as floating-point decimals
with 32-bit precision in its raw form, typically scaled between
0 and 1.
<b>%(BIT_DEPTH_16)s and %(BIT_DEPTH_FLOAT)s images are supported only
for TIF formats. Currently, saving images in 12-bit is not supported.</b>""" %
globals())
self.overwrite = cps.Binary(
"Overwrite existing files without warning?",False,doc="""
Select <i>%(YES)s</i> to automatically overwrite a file if it already exists.
Select <i>%(NO)s</i> to be prompted for confirmation first.
<p>If you are running the pipeline on a computing cluster,
select <i>%(YES)s</i> since you will not be able to intervene and answer the confirmation prompt.</p>"""%globals())
self.when_to_save = cps.Choice(
"When to save",
[WS_EVERY_CYCLE,WS_FIRST_CYCLE,WS_LAST_CYCLE],
WS_EVERY_CYCLE, doc="""<a name='when_to_save'>
<i>(Used only when saving non-movie files)</i><br>
Specify at what point during pipeline execution to save file(s). </a>
<ul>
<li><i>%(WS_EVERY_CYCLE)s:</i> Useful for when the image of interest is created every cycle and is
not dependent on results from a prior cycle.</li>
<li><i>%(WS_FIRST_CYCLE)s:</i> Useful for when you are saving an aggregate image created
on the first cycle, e.g., <b>CorrectIlluminationCalculate</b> with the <i>All</i>
setting used on images obtained directly from <b>NamesAndTypes</b>.</li>
<li><i>%(WS_LAST_CYCLE)s</i> Useful for when you are saving an aggregate image completed
on the last cycle, e.g., <b>CorrectIlluminationCalculate</b> with the <i>All</i>
setting used on intermediate images generated during each cycle.</li>
</ul> """%globals())
self.rescale = cps.Binary(
"Rescale the images? ",False,doc="""
<i>(Used only when saving non-MAT file images)</i><br>
Select <i>%(YES)s</i> if you want the image to occupy the full dynamic range of the bit
depth you have chosen. For example, if you save an image to an 8-bit file, the
smallest grayscale value will be mapped to 0 and the largest value will be mapped
to 2<sup>8</sup>-1 = 255.
<p>This will increase the contrast of the output image but will also effectively
stretch the image data, which may not be desirable in some
circumstances. See <b>RescaleIntensity</b> for other rescaling options.</p>"""%globals())
self.gray_or_color = cps.Choice(
"Save as grayscale or color image?",
[GC_GRAYSCALE, GC_COLOR],doc = """
<i>(Used only when saving "%(IF_OBJECTS)s")</i><br>
You can save objects as a grayscale image or as a color image.
<ul>
<li><i>%(GC_GRAYSCALE)s: </i> Use the pixel's object number
(label) for the grayscale intensity. Background pixels are
colored black. Grayscale images are more
suitable if you are going to load the image as objects using
<b>NamesAndTypes</b> or some other program that will be used to
relate object measurements to the pixels in the image.
You should save grayscale images using the .TIF or .MAT formats
if possible; otherwise you may have problems saving files
with more than 255 objects.</li>
<li><i>%(GC_COLOR)s:</i> Assigns different colors to different
objects.</li>
</ul>"""%globals())
self.colormap = cps.Colormap(
'Select colormap',
value = CM_GRAY,doc= """
<i>(Used only when saving non-MAT file images)</i><br>
This affects how images color intensities are displayed. All available colormaps can be seen
<a href="http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps">here</a>.""")
self.update_file_names = cps.Binary(
"Record the file and path information to the saved image?",False,doc="""
Select <i>%(YES)s</i>to store filename and pathname data for each of the new files created
via this module as a per-image measurement.
<p>Instances in which this information may be useful include:
<ul>
<li>Exporting measurements to a database, allowing
access to the saved image. If you are using the machine-learning tools or image
viewer in CellProfiler Analyst, for example, you will want to enable this setting if you want
the saved images to be displayed along with the original images.</li>
<li>Allowing downstream modules (e.g., <b>CreateWebPage</b>) to access
the newly saved files.</li>
</ul></p>"""%globals())
self.create_subdirectories = cps.Binary(
"Create subfolders in the output folder?",False,doc = """
Select <i>%(YES)s</i> to create subfolders to match the input image folder structure."""%globals())
self.root_dir = cps.DirectoryPath(
"Base image folder", doc = """
<i>Used only if creating subfolders in the output folder</i>
In subfolder mode, <b>SaveImages</b> determines the folder for
an image file by examining the path of the matching input file.
The path that SaveImages uses is relative to the image folder
chosen using this setting. As an example, input images might be stored
in a folder structure of "images%(sep)s<i>experiment-name</i>%(sep)s
<i>date</i>%(sep)s<i>plate-name</i>". If the image folder is
"images", <b>SaveImages</b> will store images in the subfolder,
"<i>experiment-name</i>%(sep)s<i>date</i>%(sep)s<i>plate-name</i>".
If the image folder is "images%(sep)s<i>experiment-name</i>",
<b>SaveImages</b> will store images in the subfolder,
<i>date</i>%(sep)s<i>plate-name</i>".
""" % dict(sep=os.path.sep))
def settings(self):
"""Return the settings in the order to use when saving"""
return [self.save_image_or_figure, self.image_name,
self.objects_name, self.figure_name,
self.file_name_method, self.file_image_name,
self.single_file_name, self.number_of_digits,
self.wants_file_name_suffix,
self.file_name_suffix, self.file_format,
self.pathname, self.bit_depth,
self.overwrite, self.when_to_save,
self.rescale, self.gray_or_color, self.colormap,
self.update_file_names, self.create_subdirectories,
self.root_dir, self.movie_format]
def visible_settings(self):
"""Return only the settings that should be shown"""
result = [self.save_image_or_figure]
if self.save_image_or_figure == IF_FIGURE:
result.append(self.figure_name)
elif self.save_image_or_figure == IF_OBJECTS:
result.append(self.objects_name)
else:
result.append(self.image_name)
result.append(self.file_name_method)
if self.file_name_method == FN_FROM_IMAGE:
result += [self.file_image_name, self.wants_file_name_suffix]
if self.wants_file_name_suffix:
result.append(self.file_name_suffix)
elif self.file_name_method == FN_SEQUENTIAL:
self.single_file_name.text = SEQUENTIAL_NUMBER_TEXT
# XXX - Change doc, as well!
result.append(self.single_file_name)
result.append(self.number_of_digits)
elif self.file_name_method == FN_SINGLE_NAME:
self.single_file_name.text = SINGLE_NAME_TEXT
result.append(self.single_file_name)
else:
raise NotImplementedError("Unhandled file name method: %s"%(self.file_name_method))
if self.save_image_or_figure == IF_MOVIE:
result.append(self.movie_format)
else:
result.append(self.file_format)
supports_16_bit = (self.file_format in FF_SUPPORTING_16_BIT and
self.save_image_or_figure == IF_IMAGE)
if supports_16_bit:
# TIFF supports 8 & 16-bit, all others are written 8-bit
result.append(self.bit_depth)
result.append(self.pathname)
result.append(self.overwrite)
if self.save_image_or_figure != IF_MOVIE:
result.append(self.when_to_save)
if (self.save_image_or_figure == IF_IMAGE and
self.file_format != FF_MAT):
result.append(self.rescale)
if self.get_bit_depth() == "8":
result.append(self.colormap)
elif self.save_image_or_figure == IF_OBJECTS:
result.append(self.gray_or_color)
if self.gray_or_color == GC_COLOR:
result.append(self.colormap)
result.append(self.update_file_names)
if self.file_name_method == FN_FROM_IMAGE:
result.append(self.create_subdirectories)
if self.create_subdirectories:
result.append(self.root_dir)
return result
@property
def module_key(self):
return "%s_%d"%(self.module_name, self.module_num)
def prepare_group(self, workspace, grouping, image_numbers):
d = self.get_dictionary(workspace.image_set_list)
if self.save_image_or_figure == IF_MOVIE:
d['N_FRAMES'] = len(image_numbers)
d['CURRENT_FRAME'] = 0
return True
def prepare_to_create_batch(self, workspace, fn_alter_path):
self.pathname.alter_for_create_batch_files(fn_alter_path)
if self.create_subdirectories:
self.root_dir.alter_for_create_batch_files(fn_alter_path)
def run(self,workspace):
"""Run the module
pipeline - instance of CellProfiler.Pipeline for this run
workspace - the workspace contains:
image_set - the images in the image set being processed
object_set - the objects (labeled masks) in this image set
measurements - the measurements for this run
frame - display within this frame (or None to not display)
"""
if self.save_image_or_figure.value in (IF_IMAGE, IF_MASK, IF_CROPPING):
should_save = self.run_image(workspace)
elif self.save_image_or_figure == IF_MOVIE:
should_save = self.run_movie(workspace)
elif self.save_image_or_figure == IF_OBJECTS:
should_save = self.run_objects(workspace)
else:
raise NotImplementedError(("Saving a %s is not yet supported"%
(self.save_image_or_figure)))
workspace.display_data.filename = self.get_filename(
workspace, make_dirs = False, check_overwrite = False)
def is_aggregation_module(self):
'''SaveImages is an aggregation module when it writes movies'''
return self.save_image_or_figure == IF_MOVIE or \
self.when_to_save == WS_LAST_CYCLE
def display(self, workspace, figure):
if self.show_window:
if self.save_image_or_figure == IF_MOVIE:
return
figure.set_subplots((1, 1))
outcome = ("Wrote %s" if workspace.display_data.wrote_image
else "Did not write %s")
figure.subplot_table(0, 0, [[outcome %
(workspace.display_data.filename)]])
def run_image(self,workspace):
"""Handle saving an image"""
#
# First, check to see if we should save this image
#
if self.when_to_save == WS_FIRST_CYCLE:
d = self.get_dictionary(workspace.image_set_list)
if workspace.measurements[cpmeas.IMAGE, cpmeas.GROUP_INDEX] > 1:
workspace.display_data.wrote_image = False
self.save_filename_measurements(workspace)
return
d["FIRST_IMAGE"] = False
elif self.when_to_save == WS_LAST_CYCLE:
workspace.display_data.wrote_image = False
self.save_filename_measurements( workspace)
return
self.save_image(workspace)
return True
def run_movie(self, workspace):
out_file = self.get_filename(workspace, check_overwrite=False)
# overwrite checks are made only for first frame.
d = self.get_dictionary(workspace.image_set_list)
if d["CURRENT_FRAME"] == 0 and os.path.exists(out_file):
if not self.check_overwrite(out_file, workspace):
d["CURRENT_FRAME"] = "Ignore"
return
else:
# Have to delete the old movie before making the new one
os.remove(out_file)
elif d["CURRENT_FRAME"] == "Ignore":
return
image = workspace.image_set.get_image(self.image_name.value)
pixels = image.pixel_data
pixels = pixels * 255
frames = d['N_FRAMES']
current_frame = d["CURRENT_FRAME"]
d["CURRENT_FRAME"] += 1
self.do_save_image(workspace, out_file, pixels, ome.PT_UINT8,
t = current_frame, size_t = frames)
def run_objects(self, workspace):
#
# First, check to see if we should save this image
#
if self.when_to_save == WS_FIRST_CYCLE:
if workspace.measurements[cpmeas.IMAGE, cpmeas.GROUP_INDEX] > 1:
workspace.display_data.wrote_image = False
self.save_filename_measurements(workspace)
return
elif self.when_to_save == WS_LAST_CYCLE:
workspace.display_data.wrote_image = False
self.save_filename_measurements( workspace)
return
self.save_objects(workspace)
def save_objects(self, workspace):
objects_name = self.objects_name.value
objects = workspace.object_set.get_objects(objects_name)
filename = self.get_filename(workspace)
if filename is None: # failed overwrite check
return
labels = [l for l, c in objects.get_labels()]
if self.get_file_format() == FF_MAT:
pixels = objects.segmented
scipy.io.matlab.mio.savemat(filename,{"Image":pixels},format='5')
elif self.gray_or_color == GC_GRAYSCALE:
if objects.count > 255:
pixel_type = ome.PT_UINT16
else:
pixel_type = ome.PT_UINT8
for i, l in enumerate(labels):
self.do_save_image(
workspace, filename, l, pixel_type, t=i, size_t=len(labels))
else:
if self.colormap == cps.DEFAULT:
colormap = cpp.get_default_colormap()
else:
colormap = self.colormap.value
cm = matplotlib.cm.get_cmap(colormap)
cpixels = np.zeros((labels[0].shape[0], labels[0].shape[1], 3))
counts = np.zeros(labels[0].shape, int)
mapper = matplotlib.cm.ScalarMappable(cmap=cm)
for pixels in labels:
cpixels[pixels != 0, :] += \
mapper.to_rgba(distance_color_labels(pixels),
bytes=True)[pixels != 0, :3]
counts[pixels != 0] += 1
counts[counts == 0] = 1
cpixels = cpixels / counts[:, :, np.newaxis]
self.do_save_image(workspace, filename, cpixels, ome.PT_UINT8)
self.save_filename_measurements(workspace)
if self.show_window:
workspace.display_data.wrote_image = True
def post_group(self, workspace, *args):
if (self.when_to_save == WS_LAST_CYCLE and
self.save_image_or_figure != IF_MOVIE):
if self.save_image_or_figure == IF_OBJECTS:
self.save_objects(workspace)
else:
self.save_image(workspace)
def do_save_image(self, workspace, filename, pixels, pixel_type,
c = 0, z = 0, t = 0,
size_c = 1, size_z = 1, size_t = 1,
channel_names = None):
'''Save image using bioformats
workspace - the current workspace
filename - save to this filename
pixels - the image to save
pixel_type - save using this pixel type
c - the image's channel index
z - the image's z index
t - the image's t index
sizeC - # of channels in the stack
sizeZ - # of z stacks
sizeT - # of timepoints in the stack
channel_names - names of the channels (make up names if not present
'''
write_image(filename, pixels, pixel_type,
c = c, z = z, t = t,
size_c = size_c, size_z = size_z, size_t = size_t,
channel_names = channel_names)
def save_image(self, workspace):
if self.show_window:
workspace.display_data.wrote_image = False
image = workspace.image_set.get_image(self.image_name.value)
if self.save_image_or_figure == IF_IMAGE:
pixels = image.pixel_data
u16hack = (self.get_bit_depth() == BIT_DEPTH_16 and
pixels.dtype.kind in ('u', 'i'))
if self.file_format != FF_MAT:
if self.rescale.value:
pixels = pixels.copy()
# Normalize intensities for each channel
if pixels.ndim == 3:
# RGB
for i in range(3):
img_min = np.min(pixels[:,:,i])
img_max = np.max(pixels[:,:,i])
if img_max > img_min:
pixels[:,:,i] = (pixels[:,:,i] - img_min) / (img_max - img_min)
else:
# Grayscale
img_min = np.min(pixels)
img_max = np.max(pixels)
if img_max > img_min:
pixels = (pixels - img_min) / (img_max - img_min)
elif not (u16hack or self.get_bit_depth() == BIT_DEPTH_FLOAT):
# Clip at 0 and 1
if np.max(pixels) > 1 or np.min(pixels) < 0:
sys.stderr.write(
"Warning, clipping image %s before output. Some intensities are outside of range 0-1" %
self.image_name.value)
pixels = pixels.copy()
pixels[pixels < 0] = 0
pixels[pixels > 1] = 1
if pixels.ndim == 2 and self.colormap != CM_GRAY and\
self.get_bit_depth() == BIT_DEPTH_8:
# Convert grayscale image to rgb for writing
if self.colormap == cps.DEFAULT:
colormap = cpp.get_default_colormap()
else:
colormap = self.colormap.value
cm = matplotlib.cm.get_cmap(colormap)
mapper = matplotlib.cm.ScalarMappable(cmap=cm)
pixels = mapper.to_rgba(pixels, bytes=True)
pixel_type = ome.PT_UINT8
elif self.get_bit_depth() == BIT_DEPTH_8:
pixels = (pixels*255).astype(np.uint8)
pixel_type = ome.PT_UINT8
elif self.get_bit_depth() == BIT_DEPTH_FLOAT:
pixel_type = ome.PT_FLOAT
else:
if not u16hack:
pixels = (pixels*65535)
pixel_type = ome.PT_UINT16
elif self.save_image_or_figure == IF_MASK:
pixels = image.mask.astype(np.uint8) * 255
pixel_type = ome.PT_UINT8
elif self.save_image_or_figure == IF_CROPPING:
pixels = image.crop_mask.astype(np.uint8) * 255
pixel_type = ome.PT_UINT8
filename = self.get_filename(workspace)
if filename is None: # failed overwrite check
return
if self.get_file_format() == FF_MAT:
scipy.io.matlab.mio.savemat(filename,{"Image":pixels},format='5')
elif self.get_file_format() == FF_BMP:
save_bmp(filename, pixels)
else:
self.do_save_image(workspace, filename, pixels, pixel_type)
if self.show_window:
workspace.display_data.wrote_image = True
if self.when_to_save != WS_LAST_CYCLE:
self.save_filename_measurements(workspace)
def check_overwrite(self, filename, workspace):
'''Check to see if it's legal to overwrite a file
Throws an exception if can't overwrite and no interaction available.
Returns False if can't overwrite, otherwise True.
'''
if not self.overwrite.value and os.path.isfile(filename):
try:
return (workspace.interaction_request(self, workspace.measurements.image_set_number, filename) == "Yes")
except workspace.NoInteractionException:
raise ValueError('SaveImages: trying to overwrite %s in headless mode, but Overwrite files is set to "No"' % (filename))
return True
def handle_interaction(self, image_set_number, filename):
'''handle an interaction request from check_overwrite()'''
import wx
dlg = wx.MessageDialog(wx.GetApp().TopWindow,
"%s #%d, set #%d - Do you want to overwrite %s?" % \
(self.module_name, self.module_num, image_set_number, filename),
"Warning: overwriting file", wx.YES_NO | wx.ICON_QUESTION)
result = dlg.ShowModal() == wx.ID_YES
return "Yes" if result else "No"
def save_filename_measurements(self, workspace):
if self.update_file_names.value:
filename = self.get_filename(workspace, make_dirs = False,
check_overwrite = False)
pn, fn = os.path.split(filename)
url = pathname2url(filename)
workspace.measurements.add_measurement(cpmeas.IMAGE,
self.file_name_feature,
fn,
can_overwrite=True)
workspace.measurements.add_measurement(cpmeas.IMAGE,
self.path_name_feature,
pn,
can_overwrite=True)
workspace.measurements.add_measurement(cpmeas.IMAGE,
self.url_feature,
url,
can_overwrite=True)
@property
def file_name_feature(self):
'''The file name measurement for the output file'''
if self.save_image_or_figure == IF_OBJECTS:
return '_'.join((C_OBJECTS_FILE_NAME, self.objects_name.value))
return '_'.join((C_FILE_NAME, self.image_name.value))
@property
def path_name_feature(self):
'''The path name measurement for the output file'''
if self.save_image_or_figure == IF_OBJECTS:
return '_'.join((C_OBJECTS_PATH_NAME, self.objects_name.value))
return '_'.join((C_PATH_NAME, self.image_name.value))
@property
def url_feature(self):
'''The URL measurement for the output file'''
if self.save_image_or_figure == IF_OBJECTS:
return '_'.join((C_OBJECTS_URL, self.objects_name.value))
return '_'.join((C_URL, self.image_name.value))
@property
def source_file_name_feature(self):
'''The file name measurement for the exemplar disk image'''
return '_'.join((C_FILE_NAME, self.file_image_name.value))
def source_path(self, workspace):
'''The path for the image data, or its first parent with a path'''
if self.file_name_method.value == FN_FROM_IMAGE:
path_feature = '%s_%s' % (C_PATH_NAME, self.file_image_name.value)
assert workspace.measurements.has_feature(cpmeas.IMAGE, path_feature),\
"Image %s does not have a path!" % (self.file_image_name.value)
return workspace.measurements.get_current_image_measurement(path_feature)
# ... otherwise, chase the cpimage hierarchy looking for an image with a path
cur_image = workspace.image_set.get_image(self.image_name.value)
while cur_image.path_name is None:
cur_image = cur_image.parent_image
assert cur_image is not None, "Could not determine source path for image %s' % (self.image_name.value)"
return cur_image.path_name
def get_measurement_columns(self, pipeline):
if self.update_file_names.value:
return [(cpmeas.IMAGE,
self.file_name_feature,
cpmeas.COLTYPE_VARCHAR_FILE_NAME),
(cpmeas.IMAGE,
self.path_name_feature,
cpmeas.COLTYPE_VARCHAR_PATH_NAME)]
else:
return []
def get_filename(self, workspace, make_dirs=True, check_overwrite=True):
"Concoct a filename for the current image based on the user settings"
measurements=workspace.measurements
if self.file_name_method == FN_SINGLE_NAME:
filename = self.single_file_name.value
filename = workspace.measurements.apply_metadata(filename)
elif self.file_name_method == FN_SEQUENTIAL:
filename = self.single_file_name.value
filename = workspace.measurements.apply_metadata(filename)
n_image_sets = workspace.measurements.image_set_count
ndigits = int(np.ceil(np.log10(n_image_sets+1)))
ndigits = max((ndigits,self.number_of_digits.value))
padded_num_string = str(measurements.image_set_number).zfill(ndigits)
filename = '%s%s'%(filename, padded_num_string)
else:
file_name_feature = self.source_file_name_feature
filename = measurements.get_current_measurement('Image',
file_name_feature)
filename = os.path.splitext(filename)[0]
if self.wants_file_name_suffix:
suffix = self.file_name_suffix.value
suffix = workspace.measurements.apply_metadata(suffix)
filename += suffix
filename = "%s.%s"%(filename,self.get_file_format())
pathname = self.pathname.get_absolute_path(measurements)
if self.create_subdirectories:
image_path = self.source_path(workspace)
subdir = relpath(image_path, self.root_dir.get_absolute_path())
pathname = os.path.join(pathname, subdir)
if len(pathname) and not os.path.isdir(pathname) and make_dirs:
try:
os.makedirs(pathname)
except:
#
# On cluster, this can fail if the path was created by
# another process after this process found it did not exist.
#
if not os.path.isdir(pathname):
raise
result = os.path.join(pathname, filename)
if check_overwrite and not self.check_overwrite(result, workspace):
return
if check_overwrite and os.path.isfile(result):
os.remove(result)
return result
def get_file_format(self):
"""Return the file format associated with the extension in self.file_format
"""
if self.save_image_or_figure == IF_MOVIE:
return self.movie_format.value
return self.file_format.value
def get_bit_depth(self):
if (self.save_image_or_figure == IF_IMAGE and
self.get_file_format() in FF_SUPPORTING_16_BIT):
return self.bit_depth.value
else:
return BIT_DEPTH_8
def upgrade_settings(self, setting_values, variable_revision_number,
module_name, from_matlab):
"""Adjust the setting values to be backwards-compatible with old versions
"""
PC_DEFAULT = "Default output folder"
#################################
#
# Matlab legacy
#
#################################
if from_matlab and variable_revision_number == 12:
# self.create_subdirectories.value is already False by default.
variable_revision_number = 13
if from_matlab and variable_revision_number == 13:
new_setting_values = list(setting_values)
for i in [3, 12]:
if setting_values[i] == '\\':
new_setting_values[i] == cps.DO_NOT_USE
variable_revision_number = 14
if from_matlab and variable_revision_number == 14:
new_setting_values = []
if setting_values[0].isdigit():
new_setting_values.extend([IF_FIGURE,setting_values[1]])
elif setting_values[3] == 'avi':
new_setting_values.extend([IF_MOVIE, setting_values[0]])
elif setting_values[0].startswith("Cropping"):
new_setting_values.extend([IF_CROPPING,
setting_values[0][len("Cropping"):]])
elif setting_values[0].startswith("CropMask"):
new_setting_values.extend([IF_MASK,
setting_values[0][len("CropMask"):]])
else:
new_setting_values.extend([IF_IMAGE, setting_values[0]])
new_setting_values.append(new_setting_values[1])
if setting_values[1] == 'N':
new_setting_values.extend([FN_SEQUENTIAL,"None","None"])
elif setting_values[1][0] == '=':
new_setting_values.extend([FN_SINGLE_NAME,setting_values[1][1:],
setting_values[1][1:]])
else:
if len(cpmeas.find_metadata_tokens(setting_values[1])):
new_setting_values.extend([FN_WITH_METADATA, setting_values[1],
setting_values[1]])
else:
new_setting_values.extend([FN_FROM_IMAGE, setting_values[1],
setting_values[1]])
new_setting_values.extend(setting_values[2:4])
if setting_values[4] == '.':
new_setting_values.extend([PC_DEFAULT, "None"])
elif setting_values[4] == '&':
new_setting_values.extend([PC_WITH_IMAGE, "None"])
else:
if len(cpmeas.find_metadata_tokens(setting_values[1])):
new_setting_values.extend([PC_WITH_METADATA,
setting_values[4]])
else:
new_setting_values.extend([PC_CUSTOM, setting_values[4]])
new_setting_values.extend(setting_values[5:11])
#
# Last value is there just to display some text in Matlab
#
new_setting_values.extend(setting_values[12:-1])
setting_values = new_setting_values
from_matlab = False
variable_revision_number = 1
##########################
#
# Version 2
#
##########################
if not from_matlab and variable_revision_number == 1:
# The logic of the question about overwriting was reversed.
if setting_values[11] == cps.YES:
setting_values[11] = cps.NO
else:
setting_values[11] = cps.YES
variable_revision_number = 2
#########################
#
# Version 3
#
#########################
if (not from_matlab) and variable_revision_number == 2:
# Default image/output directory -> Default Image Folder
if setting_values[8].startswith("Default output"):
setting_values = (setting_values[:8] +
[PC_DEFAULT]+ setting_values[9:])
elif setting_values[8].startswith("Same"):
setting_values = (setting_values[:8] +
[PC_WITH_IMAGE] + setting_values[9:])
variable_revision_number = 3
#########################
#
# Version 4
#
#########################
if (not from_matlab) and variable_revision_number == 3:
# Changed save type from "Figure" to "Module window"
if setting_values[0] == "Figure":
setting_values[0] = IF_FIGURE
setting_values = standardize_default_folder_names(setting_values,8)
variable_revision_number = 4
#########################
#
# Version 5
#
#########################
if (not from_matlab) and variable_revision_number == 4:
save_image_or_figure, image_name, figure_name,\
file_name_method, file_image_name, \
single_file_name, file_name_suffix, file_format, \
pathname_choice, pathname, bit_depth, \
overwrite, when_to_save, \
when_to_save_movie, rescale, colormap, \
update_file_names, create_subdirectories = setting_values
pathname = SaveImagesDirectoryPath.static_join_string(
pathname_choice, pathname)
setting_values = [
save_image_or_figure, image_name, figure_name,
file_name_method, file_image_name, single_file_name,
file_name_suffix != cps.DO_NOT_USE,
file_name_suffix, file_format,
pathname, bit_depth, overwrite, when_to_save,
rescale, colormap, update_file_names, create_subdirectories]
variable_revision_number = 5
#######################
#
# Version 6
#
#######################
if (not from_matlab) and variable_revision_number == 5:
setting_values = list(setting_values)
file_name_method = setting_values[3]
single_file_name = setting_values[5]
wants_file_suffix = setting_values[6]
file_name_suffix = setting_values[7]
if file_name_method == FN_IMAGE_FILENAME_WITH_METADATA:
file_name_suffix = single_file_name
wants_file_suffix = cps.YES
file_name_method = FN_FROM_IMAGE
elif file_name_method == FN_WITH_METADATA:
file_name_method = FN_SINGLE_NAME
setting_values[3] = file_name_method
setting_values[6] = wants_file_suffix
setting_values[7] = file_name_suffix
variable_revision_number = 6
######################
#
# Version 7 - added objects
#
######################
if (not from_matlab) and (variable_revision_number == 6):
setting_values = (
setting_values[:2] + ["None"] + setting_values[2:14] +
[ GC_GRAYSCALE ] + setting_values[14:])
variable_revision_number = 7
######################
#
# Version 8 - added root_dir
#
######################
if (not from_matlab) and (variable_revision_number == 7):
setting_values = setting_values + [DEFAULT_INPUT_FOLDER_NAME]
variable_revision_number = 8
######################
#
# Version 9 - FF_TIF now outputs .tif files (go figure), so
# change FF_TIF in settings to FF_TIFF to maintain ultimate
# backwards compatibiliy.
#
######################
if (not from_matlab) and (variable_revision_number == 8):
if setting_values[9] == FF_TIF:
setting_values = setting_values[:9] + [FF_TIFF] + \
setting_values[10:]
variable_revision_number = 9
######################
#
# Version 10 - Add number of digits for sequential numbering
#
######################
if (not from_matlab) and (variable_revision_number == 9):
setting_values = setting_values[:7] + ["4"] + \
setting_values[7:]
variable_revision_number = 10
######################
#
# Version 11 - Allow selection of movie format
#
######################
if (not from_matlab) and (variable_revision_number == 10):
setting_values = setting_values + [ FF_AVI ]
variable_revision_number = 11
######################
#
# Version 11.5 - name of bit depth changed
# (can fix w/o version change)
#
######################
if variable_revision_number == 11:
bit_depth = setting_values[OFFSET_BIT_DEPTH_V11]
bit_depth = {
OLD_BIT_DEPTH_8:BIT_DEPTH_8,
OLD_BIT_DEPTH_16:BIT_DEPTH_16 }.get(bit_depth, bit_depth)
setting_values = setting_values[:OFFSET_BIT_DEPTH_V11] + \
[bit_depth] + setting_values[OFFSET_BIT_DEPTH_V11+1:]
setting_values[OFFSET_DIRECTORY_PATH] = \
SaveImagesDirectoryPath.upgrade_setting(setting_values[OFFSET_DIRECTORY_PATH])
return setting_values, variable_revision_number, from_matlab
def validate_module(self, pipeline):
if (self.save_image_or_figure in (IF_IMAGE, IF_MASK, IF_CROPPING) and
self.when_to_save in (WS_FIRST_CYCLE, WS_EVERY_CYCLE)):
#
# Make sure that the image name is available on every cycle
#
for setting in cps.get_name_providers(pipeline,
self.image_name):
if setting.provided_attributes.get(cps.AVAILABLE_ON_LAST_ATTRIBUTE):
#
# If we fell through, then you can only save on the last cycle
#
raise cps.ValidationError("%s is only available after processing all images in an image group" %
self.image_name.value,
self.when_to_save)
# XXX - should check that if file_name_method is
# FN_FROM_IMAGE, that the named image actually has the
# required path measurement
# Make sure metadata tags exist
if self.file_name_method == FN_SINGLE_NAME or \
(self.file_name_method == FN_FROM_IMAGE and self.wants_file_name_suffix.value):
text_str = self.single_file_name.value if self.file_name_method == FN_SINGLE_NAME else self.file_name_suffix.value
undefined_tags = pipeline.get_undefined_metadata_tags(text_str)
if len(undefined_tags) > 0:
raise cps.ValidationError("%s is not a defined metadata tag. Check the metadata specifications in your load modules" %
undefined_tags[0],
self.single_file_name if self.file_name_method == FN_SINGLE_NAME else self.file_name_suffix)
class SaveImagesDirectoryPath(cps.DirectoryPath):
'''A specialized version of DirectoryPath to handle saving in the image dir'''
def __init__(self, text, file_image_name, doc):
'''Constructor
text - explanatory text to display
file_image_name - the file_image_name setting so we can save in same dir
doc - documentation for user
'''
super(SaveImagesDirectoryPath, self).__init__(
text, dir_choices = [
cps.DEFAULT_OUTPUT_FOLDER_NAME, cps.DEFAULT_INPUT_FOLDER_NAME,
PC_WITH_IMAGE, cps.ABSOLUTE_FOLDER_NAME,
cps.DEFAULT_OUTPUT_SUBFOLDER_NAME,
cps.DEFAULT_INPUT_SUBFOLDER_NAME], doc=doc)
self.file_image_name = file_image_name
def get_absolute_path(self, measurements=None, image_set_index=None):
if self.dir_choice == PC_WITH_IMAGE:
path_name_feature = "PathName_%s" % self.file_image_name.value
return measurements.get_current_image_measurement(path_name_feature)
return super(SaveImagesDirectoryPath, self).get_absolute_path(
measurements, image_set_index)
def test_valid(self, pipeline):
if self.dir_choice not in self.dir_choices:
raise cps.ValidationError("%s is not a valid directory option" %
self.dir_choice, self)
@staticmethod
def upgrade_setting(value):
'''Upgrade setting from previous version'''
dir_choice, custom_path = cps.DirectoryPath.split_string(value)
if dir_choice in OLD_PC_WITH_IMAGE_VALUES:
dir_choice = PC_WITH_IMAGE
elif dir_choice in (PC_CUSTOM, PC_WITH_METADATA):
if custom_path.startswith('.'):
dir_choice = cps.DEFAULT_OUTPUT_SUBFOLDER_NAME
elif custom_path.startswith('&'):
dir_choice = cps.DEFAULT_INPUT_SUBFOLDER_NAME
custom_path = '.' + custom_path[1:]
else:
dir_choice = cps.ABSOLUTE_FOLDER_NAME
else:
return cps.DirectoryPath.upgrade_setting(value)
return cps.DirectoryPath.static_join_string(dir_choice, custom_path)
def save_bmp(path, img):
'''Save an image as a Microsoft .bmp file
path - path to file to save
img - either a 2d, uint8 image or a 2d + 3 plane uint8 RGB color image
Saves file as an uncompressed 8-bit or 24-bit .bmp image
'''
#
# Details from
# http://en.wikipedia.org/wiki/BMP_file_format#cite_note-DIBHeaderTypes-3
#
# BITMAPFILEHEADER
# http://msdn.microsoft.com/en-us/library/dd183374(v=vs.85).aspx
#
# BITMAPINFOHEADER
# http://msdn.microsoft.com/en-us/library/dd183376(v=vs.85).aspx
#
BITMAPINFOHEADER_SIZE = 40
img = img.astype(np.uint8)
w = img.shape[1]
h = img.shape[0]
#
# Convert RGB to interleaved
#
if img.ndim == 3:
rgb = True
#
# Compute padded raster length
#
raster_length = (w * 3 + 3) & ~ 3
tmp = np.zeros((h, raster_length), np.uint8)
#
# Do not understand why but RGB is BGR
#
tmp[:, 2:(w*3):3] = img[:, :, 0]
tmp[:, 1:(w*3):3] = img[:, :, 1]
tmp[:, 0:(w*3):3] = img[:, :, 2]
img = tmp
else:
rgb = False
if w % 4 != 0:
raster_length = (w + 3) & ~ 3
tmp = np.zeros((h, raster_length), np.uint8)
tmp[:, :w] = img
img = tmp
#
# The image is upside-down in .BMP
#
bmp = np.ascontiguousarray(np.flipud(img)).data
with open(path, "wb") as fd:
def write2(value):
'''write a two-byte little-endian value to the file'''
fd.write(np.array([value], "<u2").data[:2])
def write4(value):
'''write a four-byte little-endian value to the file'''
fd.write(np.array([value], "<u4").data[:4])
#
# Bitmap file header (1st pass)
# byte
# 0-1 = "BM"
# 2-5 = length of file
# 6-9 = 0
# 10-13 = offset from beginning of file to bitmap bits
fd.write("BM")
length = 14 # BITMAPFILEHEADER
length += BITMAPINFOHEADER_SIZE
if not rgb:
length += 4 * 256 # 256 color table entries
hdr_length = length
length += len(bmp)
write4(length)
write4(0)
write4(hdr_length)
#
# BITMAPINFOHEADER
#
write4(BITMAPINFOHEADER_SIZE) # biSize
write4(w) # biWidth
write4(h) # biHeight
write2(1) # biPlanes = 1
write2(24 if rgb else 8) # biBitCount
write4(0) # biCompression = BI_RGB
write4(len(bmp)) # biSizeImage
write4(7200) # biXPelsPerMeter
write4(7200) # biYPelsPerMeter
write4(0 if rgb else 256) # biClrUsed (no palette)
write4(0) # biClrImportant
if not rgb:
# The color table
color_table = np.column_stack(
[np.arange(256)]* 3 +
[np.zeros(256, np.uint32)]).astype(np.uint8)
fd.write(np.ascontiguousarray(color_table, np.uint8).data)
fd.write(bmp)
|
gpl-2.0
|
TueVJ/RE-EU_datavis
|
main_no_background.py
|
1
|
8325
|
#! /usr/bin/env python
# coding=utf8
"""
main_no_background.py: Start visualization, no background.
This script runs a visualization of the electricity prices over Europe
for the period of April 2014, with controls to increase the installed wind and solar capacity,
and change scenarios for the rest of the system.
Wind and solar penetration are relative numbers (0-150%), with 100% corresponding to the scenario
where the average gross production of renewables matches average demand. The installed capacities
are 2015 numbers.
Some commented out code refers to wind and solar backgrounds, which cannot be distributed due to
licensing issues. Sorry.
"""
import matplotlib
matplotlib.use('Qt4Agg')
matplotlib.rcParams['toolbar'] = 'None'
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
import mpl_toolkits.basemap as bm
import defaults
from matplotlib import animation
from matplotlib.widgets import Slider, RadioButtons, Button
from helper_funcs import DiscreteSlider
from plot_classes import Production_Consumption_Plot, WindMap, Network_Plot, Pieplots, Priceplot
sns.set_style('ticks')
__author__ = "Tue V. Jensen"
__copyright__ = "Copyright 2016"
__credits__ = ["Tue V. Jensen"]
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "Tue V. Jensen"
__email__ = "[email protected]"
__status__ = "Eternal Prototype"
class expando(object):
pass
###
# Setup
###
NUM_TIMESTEPS = defaults.NUM_TIMESTEPS
MINWIND = 0
MAXWIND = 20
WIND_LEVELS = 21
MINPRICE = -10
MAXPRICE = 10
WIND_CAP = 1200. # GW of wind capacity at 100\% penetration
WIND_TODAY = 142. # Installed wind capacity today
SOLAR_CAP = 1048. # GW of solar capacity at 100\% penetration
SOLAR_TODAY = 95. # Installed solar capacity today
class formatspec:
def __init__(self, baseval, valfmt='{0:.00f} GW ({2:.00f}%)\n ({1:.00f}% vs today)', valtoday=1):
self.baseval = baseval
self.valfmt = valfmt
self.valtoday = valtoday
def format(self, x):
return self.valfmt.format(self.baseval*x, 100.*x*self.baseval/self.valtoday, 100*x)
def __mod__(self, x):
# return self.format(x)
return ''
WIND_SETTINGS = np.linspace(0, 1, 11)
SOLAR_SETTINGS = np.linspace(0, 1, 11)
wind_formatspec = formatspec(WIND_CAP, valtoday=WIND_TODAY)
solar_formatspec = formatspec(SOLAR_CAP, valtoday=SOLAR_TODAY)
###
# Plots
###
mymap = bm.Basemap(
projection='cyl',
llcrnrlat=defaults.LLCRNRLAT, llcrnrlon=defaults.LLCRNRLON,
urcrnrlat=defaults.URCRNRLAT, urcrnrlon=defaults.URCRNRLON,
resolution='l')
fig = plt.figure(figsize=(16, 9), dpi=80)
fig.patch.set_facecolor('white')
# Main map
ax1 = plt.subplot2grid((9, 6), (0, 0), colspan=4, rowspan=6)
contourholder = expando()
windticks = np.linspace(MINWIND, MAXWIND, WIND_LEVELS)
# windcontour = WindMap(ax1)
networkplot = Network_Plot(ax1)
pricecb = plt.colorbar(networkplot.nodeplot, ax=ax1, orientation='vertical', pad=0.05, aspect=30, extend='both', format='%.1f')
pricecb.set_label(U'Electricity price [€/MWh]')
coastlines = mymap.drawcoastlines(ax=ax1)
coastlines.set_alpha(0.5)
coastlines.set_zorder(10)
# Price in DK
ax2 = plt.subplot2grid((9, 6), (0, 4), rowspan=3, colspan=2)
thePriceplot = Priceplot(ax2)
# ax2.set_xlabel(u'Renewables in Europe [MW]')
ax2.set_ylabel(u'Mean European Price [€/MWh]')
ax2.set_ylim((defaults.MINPRICE, defaults.MAXPRICE*1.25))
sns.despine(ax=ax2, offset=3)
# Solar/wind use
ax3 = plt.subplot2grid((9, 6), (3, 4), rowspan=3, colspan=2)
ProdConPlot = Production_Consumption_Plot(ax3)
ax3.set_ylabel(u'Production/consumption [MWh]')
sns.despine(ax=ax3, offset=3)
# Renewable use
ax6 = plt.subplot2grid((9, 6), (6, 2), rowspan=3, colspan=4)
ax6.set_aspect(1)
ax6.axis('off')
pp = Pieplots(ax6)
plt.tight_layout()
###
# Controls
###
r = fig.canvas.get_renderer()
def wind_slider_change(*args, **kwargs):
networkplot.update_wind(*args, **kwargs)
ProdConPlot.update_wind(*args, **kwargs)
thePriceplot.update_wind(*args, **kwargs)
pp.update_wind(*args, **kwargs)
for a in pp.get_artists():
ax6.draw_artist(a)
fig.canvas.blit(ax6.bbox)
fig.canvas.blit(wind_slider_ax.bbox)
wind_slider_text.set_text(wind_formatspec.format(wind_slider.discrete_val))
wind_slider_text_ax.draw_artist(wind_slider_text)
fig.canvas.blit(wind_slider_text_ax.bbox)
def solar_slider_change(*args, **kwargs):
networkplot.update_solar(*args, **kwargs)
ProdConPlot.update_solar(*args, **kwargs)
thePriceplot.update_solar(*args, **kwargs)
pp.update_solar(*args, **kwargs)
for a in pp.get_artists():
ax6.draw_artist(a)
fig.canvas.blit(ax6.bbox)
fig.canvas.blit(solar_slider_ax.bbox)
solar_slider_text.set_text(solar_formatspec.format(solar_slider.discrete_val))
solar_slider_text_ax.draw_artist(solar_slider_text)
fig.canvas.blit(solar_slider_text_ax.bbox)
wind_slider_ax = plt.axes([0.08, 2.0/9, 1./3-0.16, 0.04])
wind_slider = DiscreteSlider(wind_slider_ax, 'Installed Wind', 0.0, 1.5, valinit=0.0, increment=0.1, valfmt=wind_formatspec, facecolor=sns.xkcd_rgb['sky blue'], dragging=True)
wind_slider.on_changed(wind_slider_change)
# wind_slider.valtext.set_bbox(dict(facecolor='white'))
wind_slider_text_ax = plt.axes([1./3-0.07, 2.0/9, 0.1, 0.04])
wind_slider_text_ax.axis('off')
wind_slider_text = wind_slider_text_ax.text(
0.01, 0.02, wind_formatspec.format(wind_slider.discrete_val),
verticalalignment='bottom', horizontalalignment='left',
transform=wind_slider_text_ax.transAxes,
color='black', fontsize=12, bbox=dict(facecolor='white'))
solar_slider_ax = plt.axes([0.08, 1.4/9, 1./3-0.16, 0.04])
solar_slider = DiscreteSlider(solar_slider_ax, 'Installed Solar', 0.0, 0.5, valinit=0.0, increment=0.05, valfmt=solar_formatspec, facecolor=sns.xkcd_rgb['pale yellow'], dragging=True)
solar_slider.on_changed(solar_slider_change)
# solar_slider.valtext.set_bbox(dict(facecolor='white'))
solar_slider_text_ax = plt.axes([1./3-0.07, 1.4/9, 0.1, 0.04])
solar_slider_text_ax.axis('off')
solar_slider_text = solar_slider_text_ax.text(
0.01, 0.02, solar_formatspec.format(solar_slider.discrete_val),
verticalalignment='bottom', horizontalalignment='left',
transform=solar_slider_text_ax.transAxes,
color='black', fontsize=12, bbox=dict(facecolor='white'))
scenario_dict = {
'Today\'s system': 'base',
'Nuclear is shut down': 'nuclear',
'Demand increases by 15\%': 'demandincrease'
}
scenario_list = [
'Today\'s system',
'Nuclear is shut down',
# u'CO2 price at 100 €/Ton',
# 'Gas and Oil at 3x today\'s price',
'Demand increases by 15\%'
]
scenario_select_ax = plt.axes([0.005, 0.1/9, 1./6, 1.1/9], aspect='equal', frameon=False)
scenario_select_radio = RadioButtons(scenario_select_ax, scenario_list, activecolor=sns.xkcd_rgb['dark grey'])
def scenario_change(val):
newscen = scenario_dict[val]
networkplot.update_scenario(newscen)
ProdConPlot.update_scenario(newscen)
thePriceplot.update_scenario(newscen)
pp.update_scenario(newscen)
for a in pp.get_artists():
ax6.draw_artist(a)
fig.canvas.blit(ax6.bbox)
fig.canvas.blit(scenario_select_ax)
scenario_select_radio.on_clicked(scenario_change)
bg_list = ['Plot Wind', 'Plot Solar', 'Leave Blank']
bg_dict = {
'Plot Wind': 'wind',
'Plot Solar': 'solar',
'Leave Blank': 'blank'}
# def set_plot_background(val):
# windcontour.set_bg(bg_dict[val])
# set_plot_bg_ax = plt.axes([0.05+1./6, 0.1/9, 1./6, 1.1/9], aspect='equal', frameon=False)
# set_plot_bg_radio = RadioButtons(set_plot_bg_ax, bg_list, activecolor=sns.xkcd_rgb['dark grey'])
# set_plot_bg_radio.on_clicked(set_plot_background)
###
# Animated areas controlled here
###
def init():
pass
def animate(i):
# windout = windcontour.animate(i)
ProdConPlot.animate(i)
netout = networkplot.animate(i)
thePriceplot.animate(i)
return ProdConPlot.areas + [ProdConPlot.curtime_line] + \
[coastlines] + netout + thePriceplot.areas + thePriceplot.lines + [thePriceplot.curtime_line] # windout + \
ani = animation.FuncAnimation(fig, animate, frames=NUM_TIMESTEPS, interval=100, repeat=True, repeat_delay=1000, blit=True)
# global animate
# animate = True
plt.show()
|
mit
|
maxisi/gwsumm
|
gwsumm/plot/segments.py
|
1
|
49854
|
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2013)
#
# This file is part of GWSumm.
#
# GWSumm is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWSumm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWSumm. If not, see <http://www.gnu.org/licenses/>.
"""Definitions for the standard plots
"""
from __future__ import division
import hashlib
import bisect
from itertools import cycle
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from dateutil.relativedelta import relativedelta
from matplotlib.patches import Rectangle
from gwpy.plotter import *
from gwpy.plotter.tex import label_to_latex
from gwpy.time import (from_gps, to_gps)
from .. import (globalv, mode, version)
from ..config import NoOptionError
from ..utils import (re_quote, get_odc_bitmask, re_flagdiv)
from ..data import (get_channel, get_timeseries)
from ..segments import (get_segments, format_padding)
from ..state import ALLSTATE
from .core import (BarPlot, PiePlot)
from .registry import (get_plot, register_plot)
from .mixins import *
__author__ = 'Duncan Macleod <[email protected]>'
__version__ = version.version
TimeSeriesDataPlot = get_plot('timeseries')
GREEN = (0.2, 0.8, 0.2)
class SegmentDataPlot(SegmentLabelSvgMixin, TimeSeriesDataPlot):
"""Segment plot of one or more `DataQualityFlags <DataQualityFlag>`.
"""
type = 'segments'
data = 'segments'
defaults = {'mask': None,
'color': None,
'on_is_bad': False,
'insetlabels': 'inset',
'edgecolor': 'black',
'legend-bbox_to_anchor': (1.01, 1.),
'legend-loc': 'upper left',
'legend-borderaxespad': 0,
'legend-fontsize': 12}
def __init__(self, flags, start, end, state=None, outdir='.', **kwargs):
padding = kwargs.pop('padding', None)
super(SegmentDataPlot, self).__init__([], start, end, state=state,
outdir=outdir, **kwargs)
self._allflags = []
self.flags = flags
self.preview_labels = False
self.padding = padding
def get_channel_groups(self, *args, **kwargs):
return [(f, [f]) for f in self.flags]
@property
def flags(self):
return [f.name for f in self._flags]
@flags.setter
def flags(self, flist):
if isinstance(flist, str):
flist = [f.strip('\n ') for f in flist.split(',')]
self._flags = []
for f in flist:
self.add_flag(f)
def add_flag(self, f):
# append flag to main list
if isinstance(f, DataQualityFlag):
self._flags.append(f)
else:
self._flags.append(DataQualityFlag(f))
# append raw flags to 'allflags' property
flags = re_flagdiv.split(str(f))[::2]
for f in flags:
if not f:
continue
self._allflags.append(DataQualityFlag(f))
@property
def allflags(self):
return [f.name for f in self._allflags]
@property
def padding(self):
return OrderedDict((f.name, f.padding) for f in self._allflags)
@padding.setter
def padding(self, pad):
for f, p in format_padding(self._allflags, pad).iteritems():
if isinstance(p, (float, int)):
f.padding = (p, p)
else:
f.padding = p
@property
def ifos(self):
"""Interferometer set for this `SegmentDataPlot`
"""
return set([f.strip('!&-_')[:2] for f in self.allflags])
@property
def pid(self):
"""File pid for this `DataPlot`.
"""
try:
return self._pid
except AttributeError:
self._pid = hashlib.md5(
"".join(map(str, self.flags))).hexdigest()[:6]
return self.pid
@pid.setter
def pid(self, id_):
self._pid = str(id_)
@classmethod
def from_ini(cls, config, section, start, end, flags=None, state=ALLSTATE,
**kwargs):
# get padding
try:
kwargs.setdefault(
'padding', config.get(section, 'padding'))
except NoOptionError:
pass
if 'padding' in kwargs:
kwargs['padding'] = list(eval(kwargs['padding']))
# build figure
new = super(SegmentDataPlot, cls).from_ini(config, section, start,
end, state=state, **kwargs)
# get flags
if flags is None:
flags = dict(config.items(section)).pop('flags', [])
if isinstance(flags, str):
flags = [f.strip('\n ') for f in flags.split(',')]
new.flags = flags
return new
def get_segment_color(self):
"""Parse the configured ``pargs`` and determine the colors for
active and valid segments.
"""
active = self.pargs.pop('active', None)
known = self.pargs.pop('known', 'undefined')
# both defined by user
if active is not None and known is not 'undefined':
return active, known
# only active defined by user
elif isinstance(active, str) and active.lower() != 'red':
return active, 'red'
elif active is not None:
return active, 'blue'
# only known defined by user
elif known not in ['undefined', None, GREEN, 'green', 'g']:
return GREEN, known
elif known != 'undefined':
return 'blue', known
else:
onisbad = bool(self.pargs.pop('on_is_bad', True))
if onisbad:
return 'red', GREEN
else:
return GREEN, 'red'
def process(self):
# get labelsize
_labelsize = rcParams['ytick.labelsize']
labelsize = self.pargs.pop('labelsize', 12)
if self.pargs.get('insetlabels', True) is False:
rcParams['ytick.labelsize'] = labelsize
# create figure
(plot, axes) = self.init_plot(plot=SegmentPlot)
ax = axes[0]
# extract plotting arguments
legendargs = self.parse_legend_kwargs()
mask = self.pargs.pop('mask')
activecolor, validcolor = self.get_segment_color()
if isinstance(activecolor, dict):
self.pargs.update(activecolor)
else:
self.pargs['facecolor'] = activecolor
plotargs = self.parse_plot_kwargs()
for i, kwdict in enumerate(plotargs):
if isinstance(validcolor, dict) or validcolor is None:
kwdict['known'] = validcolor
elif (validcolor is None or isinstance(validcolor, str) or
isinstance(validcolor[0], (float, int))):
kwdict['known'] = {'facecolor': validcolor}
else:
kwdict['known'] = {'facecolor': validcolor[i]}
legcolors = plotargs[0].copy()
# plot segments
for i, (flag, pargs) in enumerate(
zip(self.flags, plotargs)[::-1]):
label = re_quote.sub('', pargs.pop('label', str(flag)))
if (self.fileformat == 'svg' and not str(flag) in label and
ax.get_insetlabels()):
label = '%s [%s]' % (label, str(flag))
elif self.fileformat == 'svg' and not str(flag) in label:
label = '[%s] %s' % (label, str(flag))
if self.state and not self.all_data:
valid = self.state.active
else:
valid = SegmentList([self.span])
segs = get_segments(flag, validity=valid, query=False,
padding=self.padding).coalesce()
ax.plot(segs, y=i, label=label, **pargs)
# make custom legend
known = legcolors.pop('known', None)
if known:
active = legcolors.pop('facecolor')
edgecolor = legcolors.pop('edgecolor')
epoch = ax.get_epoch()
xlim = ax.get_xlim()
seg = SegmentList([Segment(self.start - 10, self.start - 9)])
v = ax.plot(seg, facecolor=known['facecolor'],
collection=False)[0][0]
a = ax.plot(seg, facecolor=active, edgecolor=edgecolor,
collection=False)[0][0]
if edgecolor not in [None, 'none']:
t = ax.plot(seg, facecolor=edgecolor, collection=False)[0][0]
ax.legend([v, a, t], ['Known', 'Active', 'Transition'],
**legendargs)
else:
ax.legend([v, a], ['Known', 'Active'], **legendargs)
ax.set_epoch(epoch)
ax.set_xlim(*xlim)
# customise plot
for key, val in self.pargs.iteritems():
try:
getattr(ax, 'set_%s' % key)(val)
except AttributeError:
setattr(ax, key, val)
if 'ylim' not in self.pargs:
ax.set_ylim(-0.5, len(self.flags) - 0.5)
# add bit mask axes and finalise
if mask is None and not plot.colorbars:
plot.add_colorbar(ax=ax, visible=False)
elif mask is not None:
plot.add_bitmask(mask, topdown=True)
if self.state and self.state.name != ALLSTATE:
self.add_state_segments(ax)
rcParams['ytick.labelsize'] = _labelsize
return self.finalize()
register_plot(SegmentDataPlot)
class StateVectorDataPlot(TimeSeriesDataPlot):
"""DataPlot of some `StateVector` data.
While technically a sub-class of the `TimeSeriesDataPlot`, for
data access and processing reasons, the output shadows that of the
`SegmentDataPlot` more closely.
"""
type = 'statevector'
data = 'statevector'
defaults = SegmentDataPlot.defaults.copy()
# copy from SegmentDataPlot
flag = property(fget=SegmentDataPlot.flags.__get__,
fset=SegmentDataPlot.flags.__set__,
fdel=SegmentDataPlot.flags.__delete__,
doc="""List of flags generated for this
`StateVectorDataPlot`.""")
get_segment_color = SegmentDataPlot.__dict__['get_segment_color']
def __init__(self, *args, **kwargs):
super(StateVectorDataPlot, self).__init__(*args, **kwargs)
self.flags = []
@property
def pid(self):
try:
return self._pid
except:
chans = "".join(map(str, self.channels))
self._pid = hashlib.md5(chans).hexdigest()[:6]
if self.pargs.get('bits', None):
self._pid = hashlib.md5(
self._pid + str(self.pargs['bits'])).hexdigest()[:6]
return self.pid
def _parse_labels(self, defaults=[]):
"""Pop the labels for plotting from the `pargs` for this Plot
This method overrides from the `TimeSeriesDataPlot` in order
to set the bit names from the various channels as the defaults
in stead of the channel names
"""
chans = zip(*self.get_channel_groups())[0]
labels = list(self.pargs.pop('labels', defaults))
if isinstance(labels, (unicode, str)):
labels = labels.split(',')
for i, l in enumerate(labels):
if isinstance(l, (list, tuple)):
labels[i] = list(labels[i])
for j, l2 in enumerate(l):
labels[i][j] = rUNDERSCORE.sub(r'\_', str(l2).strip('\n '))
elif isinstance(l, str):
labels[i] = rUNDERSCORE.sub(r'\_', str(l).strip('\n '))
while len(labels) < len(chans):
labels.append(None)
return labels
def process(self):
# make font size smaller
_labelsize = rcParams['ytick.labelsize']
labelsize = self.pargs.pop('labelsize', 12)
if self.pargs.get('insetlabels', True) is False:
rcParams['ytick.labelsize'] = labelsize
(plot, axes) = self.init_plot(plot=SegmentPlot)
ax = axes[0]
# get bit setting
bits = self.pargs.pop('bits', None)
if bits and len(self.channels) > 1:
raise ValueError("Specifying 'bits' doesn't work for a "
"state-vector plot including multiple channels")
# extract plotting arguments
mask = self.pargs.pop('mask')
ax.set_insetlabels(self.pargs.pop('insetlabels', True))
activecolor, validcolor = self.get_segment_color()
edgecolor = self.pargs.pop('edgecolor')
plotargs = {'facecolor': activecolor,
'edgecolor': edgecolor}
if isinstance(validcolor, dict):
plotargs['known'] = validcolor
elif (validcolor is None or isinstance(validcolor, str) or
isinstance(validcolor[0], (float, int))):
plotargs['known'] = {'facecolor': validcolor}
else:
plotargs['known'] = {'facecolor': validcolor[i]}
extraargs = self.parse_plot_kwargs()
# plot segments
nflags = 0
for channel, pargs in zip(self.channels[::-1], extraargs[::-1]):
if self.state and not self.all_data:
valid = self.state.active
else:
valid = SegmentList([self.span])
channel = get_channel(channel)
if bits:
bits_ = [x if i in bits else None for
(i, x) in enumerate(channel.bits)]
else:
bits_ = channel.bits
data = get_timeseries(str(channel), valid, query=False,
statevector=True)
flags = None
for stateseries in data:
if not stateseries.size:
stateseries.epoch = self.start
stateseries.dx = 0
if channel.sample_rate is not None:
stateseries.sample_rate = channel.sample_rate
stateseries.bits = bits_
if not 'int' in str(stateseries.dtype):
stateseries = stateseries.astype('uint32')
newflags = stateseries.to_dqflags().values()
if flags is None:
flags = newflags
else:
for i, flag in enumerate(newflags):
flags[i] += flag
nflags += len([m for m in bits_ if m is not None])
labels = pargs.pop('label', [None]*len(flags))
if isinstance(labels, str):
labels = [labels]
while len(labels) < len(flags):
labels.append(None)
for flag, label in zip(flags, labels)[::-1]:
kwargs = pargs.copy()
kwargs.update(plotargs)
if label is not None:
kwargs['label'] = label
ax.plot(flag, **kwargs)
# customise plot
for key, val in self.pargs.iteritems():
try:
getattr(ax, 'set_%s' % key)(val)
except AttributeError:
setattr(ax, key, val)
if 'ylim' not in self.pargs:
ax.set_ylim(-0.5, nflags - 0.5)
# add bit mask axes and finalise
if mask is None and not plot.colorbars:
plot.add_colorbar(ax=ax, visible=False)
elif mask is not None:
plot.add_bitmask(mask, topdown=True)
if self.state and self.state.name != ALLSTATE:
self.add_state_segments(ax)
# reset tick size and return
rcParams['ytick.labelsize'] = _labelsize
return self.finalize()
register_plot(StateVectorDataPlot)
class DutyDataPlot(SegmentDataPlot):
"""`DataPlot` of the duty-factor for a `SegmentList`
"""
type = 'duty'
data = 'segments'
defaults = {'alpha': 0.8,
'sep': False,
'side_by_side': False,
'normalized': None,
'cumulative': False,
'stacked': False,
'ylabel': r'Duty factor [\%]'}
def __init__(self, flags, start, end, state=None, outdir='.',
bins=None, **kwargs):
kwargs.setdefault('fileformat', 'png')
super(DutyDataPlot, self).__init__(flags, start, end, state=state,
outdir=outdir, **kwargs)
self.bins = bins
@property
def pid(self):
try:
return self._pid
except:
super(DutyDataPlot, self).pid
if self.pargs.get('cumulative', False):
self._pid += '_CUMULATIVE'
return self.pid
@pid.setter
def pid(self, p):
self._pid = p
def get_bins(self):
"""Work out the correct histogram binning for this `DutyDataPlot`
"""
# if not given anything, work it out from the mode
if self.bins is None:
m = mode.MODE_NAME[mode.get_mode()]
duration = float(abs(self.span))
# for year mode, use a month
if m in ['YEAR'] or duration >= 86400 * 300:
dt = relativedelta(months=1)
# for more than 8 weeks, use weeks
elif duration >= 86400 * 7 * 8:
dt = relativedelta(weeks=1)
# for week and month mode, use daily
elif m in ['WEEK', 'MONTH'] or duration >= 86400 * 7:
dt = relativedelta(days=1)
# for day mode, make hourly duty factor
elif m in ['DAY']:
dt = relativedelta(hours=1)
# otherwise provide 10 bins
else:
dt = relativedelta(seconds=float(abs(self.span))/10.)
# if given a float, assume this is the bin size
elif isinstance(self.bins, (float, int)):
dt = relativedelta(seconds=self.bins)
# if we don't have a list, we must have worked out dt
if not isinstance(self.bins, (list, tuple, numpy.ndarray)):
self.bins = []
s = from_gps(self.start)
e = from_gps(self.end)
while s < e:
t = int(to_gps(s + dt) - to_gps(s))
self.bins.append(t)
s += dt
self.bins = numpy.asarray(self.bins)
return self.bins
def calculate_duty_factor(self, segments, bins=None, cumulative=False,
normalized=None):
if normalized is None and cumulative:
normalized = False
elif normalized is None:
normalized = 'percent'
if normalized == 'percent':
normalized = 100.
else:
normalized = float(normalized)
if not bins:
bins = self.get_bins()
if isinstance(segments, DataQualityFlag):
segments = segments.known & segments.active
duty = numpy.zeros(len(bins))
mean = numpy.zeros(len(bins))
for i in range(len(bins)):
bin = SegmentList([Segment(self.start + sum(bins[:i]),
self.start + sum(bins[:i+1]))])
d = float(abs(segments & bin))
if normalized:
d *= normalized / float(bins[i])
duty[i] = d
mean[i] = duty[:i+1].mean()
if cumulative:
duty = duty.cumsum()
return duty, mean
def process(self, outputfile=None):
sep = self.pargs.pop('sep', False)
if sep:
if self.pargs.get('side_by_side'):
raise ValueError('DutyDataPlot parameters \'sep\' and '
'\'side_by_side\' should not be used together')
geometry = (len(self.flags), 1)
else:
geometry = (1, 1)
(plot, axes) = self.init_plot(plot=TimeSeriesPlot, geometry=geometry)
# extract plotting arguments
style = self.pargs.pop('style', 'bar')
stacked = self.pargs.pop('stacked', False)
sidebyside = self.pargs.pop('side_by_side', False)
normalized = self.pargs.pop('normalized', True)
cumulative = self.pargs.pop('cumulative', False)
if normalized is None and not cumulative:
normalized = 'percent'
plotargs = self.parse_plot_kwargs()
legendargs = self.parse_legend_kwargs()
if sep:
legendargs.setdefault('loc', 'upper left')
legendargs.setdefault('bbox_to_anchor', (1.01, 1))
legendargs.setdefault('borderaxespad', 0)
rollingmean = self.pargs.pop('rolling_mean',
not stacked and not cumulative)
# work out times and plot mean for legend
self.get_bins()
times = float(self.start) + numpy.concatenate(
([0], self.bins[:-1].cumsum()))
now = bisect.bisect_left(times, globalv.NOW)
if rollingmean:
axes[0].plot(times[:1], [-1], 'k--', label='Rolling mean')
# get bar parameters
try:
bottom = self.pargs['ylim'][0]
except KeyError:
bottom = 0
bottom = numpy.zeros(times.size) + bottom
# plot segments
if self.state and not self.all_data:
valid = self.state.active
else:
valid = SegmentList([self.span])
for i, (ax, flag, pargs, color) in enumerate(
zip(cycle(axes), self.flags, plotargs,
cycle(rcParams['axes.color_cycle']))):
# get segments
segs = get_segments(flag, validity=valid, query=False,
padding=self.padding)
duty, mean = self.calculate_duty_factor(
segs, normalized=normalized, cumulative=cumulative)
# plot duty cycle
if sep and pargs.get('label') == flag.replace('_', r'\_'):
pargs.pop('label', None)
elif 'label' in pargs and normalized == 'percent' and not stacked:
if legendargs.get('loc', None) in ['upper left', 2]:
pargs['label'] = pargs['label'] + '\n[%.1f\\%%]' % mean[-1]
else:
pargs['label'] = pargs['label'] + r' [%.1f\%%]' % mean[-1]
color = pargs.pop('color', color)
# plot in relevant style
if style == 'line':
lineargs = pargs.copy()
lineargs.setdefault('drawstyle', 'steps-post')
ax.plot(times[:now], duty[:now], color=color, **lineargs)
elif style not in ['bar', 'fill']:
raise ValueError("Cannot display %s with style=%r"
% (type(self).__name__, style))
else:
# work out positions
if sidebyside:
pad = .1
x = 1 - pad * 2
w = pargs.pop('width', 1.) * x / len(self.flags)
offset = pad + x/len(self.flags) * (i + 1/2.)
print(w, offset)
elif stacked:
offset = .5
w = pargs.pop('width', .9)
else:
offset = .5
w = pargs.pop('width', 1.)
width = w * self.bins[:now]
if stacked:
height = duty
pargs.setdefault('edgecolor', color)
else:
height = duty - bottom
if style == 'fill':
width = self.bins[:now]
ec = pargs.pop('edgecolor', 'black')
pargs['edgecolor'] = 'none'
lw = pargs.pop('linewidth', 1)
pargs['linewidth'] = 0
b = ax.bar(times[:now] + self.bins * offset, height[:now],
bottom=bottom[:now], align='center',
width=width, color=color, **pargs)
if style == 'fill':
ax.plot(times[:now], duty[:now], drawstyle='steps-post',
color=ec, linewidth=lw)
# plot mean
if rollingmean:
t = [self.start] + list(times + self.bins/2.) + [self.end]
mean = [mean[0]] + list(mean) + [mean[-1]]
ax.plot(t, mean, color=sep and 'k' or color, linestyle='--')
# record duty for stacked chart
if stacked:
bottom += height
# customise plot
for key, val in self.pargs.iteritems():
for ax in axes:
try:
getattr(ax, 'set_%s' % key)(val)
except AttributeError:
setattr(ax, key, val)
if 'hours' in self.pargs.get('ylabel', ''):
ax.get_yaxis().get_major_locator().set_params(
steps=[1, 2, 4, 8, 12, 24])
if sep:
# set text
ylabel = axes[0].yaxis.get_label()
y = axes[-1].get_position().y0 + (
axes[0].get_position().y1 - axes[-1].get_position().y0)/2.
t = plot.text(0.04, y, ylabel.get_text(), rotation=90, ha='center',
va='center')
t.set_fontproperties(ylabel.get_font_properties())
for i, ax in enumerate(axes):
ax.set_ylabel('')
if i:
ax.set_title('')
if i < len(axes) - 1:
ax.set_xlabel('')
# add custom legend for mean
if rollingmean:
yoff = 0.01 * float.__div__(*axes[0].get_position().size)
lkwargs = legendargs.copy()
lkwargs.update({
'loc': 'lower right',
'bbox_to_anchor': (1.0, 1. + yoff),
'fontsize': 12,
'borderaxespad': 0,
})
leg = axes[0].legend(['Rolling mean'], **lkwargs)
if leg.get_frame().get_edgecolor() != 'none':
ax.legend_.get_frame().set_edgecolor(rcParams['grid.color'])
axes[0].add_artist(leg)
axes[0].lines[0].set_label('_')
for ax in axes:
try:
plot.add_legend(ax=ax, **legendargs)
except AttributeError:
pass
# add extra axes and finalise
if not plot.colorbars:
for ax in axes:
plot.add_colorbar(ax=ax, visible=False)
if self.state:
self.add_state_segments(axes[-1])
return self.finalize(outputfile=outputfile)
register_plot(DutyDataPlot)
class ODCDataPlot(SegmentLabelSvgMixin, StateVectorDataPlot):
"""Custom `StateVectorDataPlot` for ODCs with bitmasks
"""
type = 'odc'
data = 'odc'
defaults = StateVectorDataPlot.defaults.copy()
defaults.update({
'no_summary_bit': False,
'in_mask_color': (.0, .4, 1.),
'masked_off_color': 'red',
'unmasked_off_color': (1.0, 0.7, 0.0),
'legend-loc': 'upper left',
'legend-bbox_to_anchor': (1.01, 1),
'legend-borderaxespad': 0.,
'legend-fontsize': 10,
})
def __init__(self, *args, **kwargs):
bitmaskc = kwargs.pop('bitmask_channel', None)
super(ODCDataPlot, self).__init__(*args, **kwargs)
if bitmaskc:
self.bitmask = bitmaskc.split(',')
else:
self.bitmask = map(get_odc_bitmask, self.channels)
def get_bitmask_channels(self):
return type(self.channels)(list(map(get_channel, self.bitmask)))
@property
def pid(self):
try:
return self._pid
except:
chans = "".join(map(str, self.channels))
masks = "".join(map(str, self.get_bitmask_channels()))
self._pid = hashlib.md5(chans+masks).hexdigest()[:6]
if self.pargs.get('bits', None):
self._pid = hashlib.md5(
self._pid + str(self.pargs['bits'])).hexdigest()[:6]
return self.pid
def process(self):
# make font size smaller
_labelsize = rcParams['ytick.labelsize']
labelsize = self.pargs.pop('labelsize', 12)
rcParams['ytick.labelsize'] = labelsize
# make figure
(plot, axes) = self.init_plot(plot=SegmentPlot)
ax = axes[0]
ax.grid(False, which='both', axis='y')
# extract plotting arguments
ax.set_insetlabels(self.pargs.pop('insetlabels', True))
nosummary = self.pargs.pop('no_summary_bit', False)
activecolor = self.pargs.pop('active', GREEN)
edgecolor = self.pargs.pop('edgecolor', 'black')
maskoncolor = self.pargs.pop('masked_off_color', 'red')
maskoffcolor = self.pargs.pop('unmasked_off_color', (1.0, 0.7, 0.0))
inmaskcolor = self.pargs.pop('in_mask_color', (.0, .4, 1.))
plotargs = {'facecolor': activecolor,
'edgecolor': edgecolor,
'height': .8}
legendargs = self.parse_legend_kwargs()
# plot segments
nflags = 0
for i, (channel, bitmaskchan) in enumerate(
zip(self.channels, self.get_bitmask_channels())):
if self.state and not self.all_data:
valid = self.state.active
else:
valid = SegmentList([self.span])
# read ODC and bitmask vector
data = get_timeseries(str(channel), valid, query=False,
statevector=True)
bitmask = get_timeseries(bitmaskchan, valid, query=False,
statevector=True)
# plot bitmask
flags = {}
# plot bits
for type_, svlist in zip(['bitmask', 'data'], [bitmask, data]):
flags[type_] = None
for stateseries in svlist:
if not stateseries.size:
stateseries.epoch = self.start
stateseries.dx = 0
if channel.sample_rate is not None:
stateseries.sample_rate = channel.sample_rate
stateseries.bits = channel.bits
if not 'int' in str(stateseries.dtype):
stateseries = stateseries.astype('uint32')
newflags = stateseries.to_dqflags()
if flags[type_] is None:
flags[type_] = newflags
else:
for i, flag in newflags.iteritems():
flags[type_][i] += flag
i = 0
for i, bit in enumerate(channel.bits):
if bit is None or bit == '':
continue
try:
mask = flags['bitmask'][bit].active
except TypeError:
continue
segs = flags['data'][bit]
label = '[%s] %s' % (i, segs.name)
# plot summary bit
if segs.name == channel.bits[0] and not nosummary:
summargs = plotargs.copy()
summargs['height'] *= 3
ax.plot(segs, y=-nflags - 1, label=label,
known=maskoncolor, **summargs)
nflags += 2
# plot masks and separate masked/not masked
else:
maskon = segs.copy()
maskon.known &= mask
maskon.active &= mask
maskoff = segs.copy()
maskoff.known -= mask
maskoff.active -= mask
# plot mask
ax.plot(mask, y=-nflags, facecolor=inmaskcolor,
edgecolor='none', height=1., label=None,
collection=False, zorder=-1001)
# plot mask
if maskoff:
ax.plot(maskoff, y=-nflags, label=label,
known=maskoffcolor, **plotargs)
label = None
if maskon:
ax.plot(maskon, y=-nflags, label=label,
known=maskoncolor, **plotargs)
label = '[%s] %s' % (i, segs.name)
nflags += 1
# make custom legend
epoch = ax.get_epoch()
xlim = ax.get_xlim()
seg = Segment(self.start - 10, self.start - 9)
m = ax.build_segment(seg, y=0, facecolor=inmaskcolor, edgecolor='none')
v = ax.build_segment(seg, y=0, facecolor=maskoncolor,
edgecolor=edgecolor)
x = ax.build_segment(seg, y=0, facecolor=maskoffcolor,
edgecolor=edgecolor)
a = ax.build_segment(seg, y=0, facecolor=activecolor,
edgecolor=edgecolor)
if edgecolor not in [None, 'none']:
t = ax.build_segment(seg, y=0, facecolor=edgecolor)
ax.legend([m, v, x, a, t],
['In bitmask', 'Bit masked\nand OFF',
'Bit unmasked\nand OFF', 'Bit ON',
'Transition'], **legendargs)
else:
ax.legend([m, v, x, a],
['In bitmask', 'Bit masked\nand OFF',
'Bit unmasked\nand OFF', 'Bit ON'],
**legendargs)
ax.set_epoch(epoch)
ax.set_xlim(*xlim)
# customise plot
for key, val in self.pargs.iteritems():
try:
getattr(ax, 'set_%s' % key)(val)
except AttributeError:
setattr(ax, key, val)
if 'ylim' not in self.pargs:
ax.set_ylim(-nflags+0.5, 0.5)
# add bit mask axes and finalise
if not plot.colorbars:
plot.add_colorbar(ax=ax, visible=False)
if self.state and self.state.name != ALLSTATE:
self.add_state_segments(ax)
out = self.finalize()
rcParams['ytick.labelsize'] = _labelsize
return out
register_plot(ODCDataPlot)
class SegmentPiePlot(PiePlot, SegmentDataPlot):
type = 'segment-pie'
defaults = {
'legend-loc': 'center left',
'legend-bbox_to_anchor': (.8, .5),
'legend-frameon': False,
'wedge-width': .55,
'wedge-edgecolor': 'white',
}
def init_plot(self, plot=Plot, geometry=(1,1)):
"""Initialise the Figure and Axes objects for this
`TimeSeriesDataPlot`.
"""
figsize = self.pargs.pop('figsize', [12, 6])
self.plot = Plot(figsize=figsize)
axes = [self.plot.gca()]
return self.plot, axes
def parse_wedge_kwargs(self, defaults=dict()):
wedgeargs = defaults.copy()
for key in self.pargs.keys():
if key.startswith('wedge-') or key.startswith('wedge_'):
wedgeargs[key[6:]] = self.pargs.pop(key)
return wedgeargs
def process(self, outputfile=None):
(plot, axes) = self.init_plot(plot=Plot)
ax = axes[0]
# get labels
#flags = map(lambda f: str(f).replace('_', r'\_'), self.flags)
#labels = self.pargs.pop('labels', self.pargs.pop('label', flags))
#labels = map(lambda s: re_quote.sub('', str(s).strip('\n ')), labels)
# extract plotting arguments
future = self.pargs.pop('include_future', False)
legendargs = self.parse_legend_kwargs()
wedgeargs = self.parse_wedge_kwargs()
plotargs = self.parse_plot_kwargs()
# use state to generate suptitle with GPS span
if self.state:
self.pargs.setdefault(
'suptitle',
'[%s-%s, state: %s]' % (self.span[0], self.span[1],
label_to_latex(str(self.state))))
else:
self.pargs.setdefault(
'suptitle', '[%s-%s]' % (self.span[0], self.span[1]))
# get segments
data = []
for flag in self.flags:
if self.state and not self.all_data:
valid = self.state.active
else:
valid = SegmentList([self.span])
segs = get_segments(flag, validity=valid, query=False,
padding=self.padding).coalesce()
data.append(float(abs(segs.active)))
if future:
total = sum(data)
alltime = abs(self.span)
data.append(alltime-total)
if 'labels' in plotargs:
plotargs['labels'] = list(plotargs['labels']) + [' ']
if 'colors' in plotargs:
plotargs['colors'] = list(plotargs['colors']) + ['white']
# make pie
labels = plotargs.pop('labels')
patches = ax.pie(data, **plotargs)[0]
ax.axis('equal')
# set wedge params
for wedge in patches:
for key, val in wedgeargs.iteritems():
getattr(wedge, 'set_%s' % key)(val)
# make legend
legendargs['title'] = self.pargs.pop('title', None)
legth = legendargs.pop('threshold', 0)
legsort = legendargs.pop('sorted', False)
tot = float(sum(data))
pclabels = []
for d, label in zip(data, labels):
if not label or label == ' ':
pclabels.append(label)
else:
try:
pc = d/tot * 100
except ZeroDivisionError:
pc = 0.0
pclabels.append(label_to_latex(
'%s [%1.1f%%]' % (label, pc)).replace(r'\\', '\\'))
# add time to top
suptitle = self.pargs.pop('suptitle', None)
if suptitle:
extra = Rectangle((0,0), 1, 1, fc='w', fill=False, ec='none',
linewidth=0)
# sort entries
if legsort:
patches, pclabels, data = map(list, zip(*sorted(
zip(patches, pclabels, data),
key=lambda x: x[2],
reverse=True)))
# and restrict to the given threshold
if legth:
patches, pclabels, data = map(list, zip(*[
x for x in zip(patches, pclabels, data) if x[2] >= legth]))
if suptitle:
leg = ax.legend([extra]+patches, [suptitle]+pclabels, **legendargs)
t = leg.get_texts()[0]
t.set_fontproperties(t.get_fontproperties().copy())
t.set_size(min(12, t.get_size()))
else:
leg = ax.legend(patches, pclabels, **legendargs)
legt = leg.get_title()
legt.set_fontsize(max(22, legendargs.get('fontsize', 22)+4))
legt.set_ha('left')
# customise plot
for key, val in self.pargs.iteritems():
try:
getattr(ax, 'set_%s' % key)(val)
except AttributeError:
setattr(ax, key, val)
# copy title and move axes
if ax.get_title():
title = plot.suptitle(ax.get_title())
title.update_from(ax.title)
title.set_y(title._y + 0.05)
ax.set_title('')
axpos = ax.get_position()
offset = -.2
ax.set_position([axpos.x0+offset, .1, axpos.width, .8])
# add bit mask axes and finalise
self.pargs['xlim'] = None
return self.finalize(outputfile=outputfile, transparent="True",
pad_inches=0)
register_plot(SegmentPiePlot)
class NetworkDutyPiePlot(SegmentPiePlot):
"""Special case of the `SegmentPiePlot` for network duty factors
"""
type = 'network-duty-pie'
NETWORK_NAME = {
0: 'no',
1: 'single',
2: 'double',
3: 'triple',
4: 'quadruple',
5: 'quintuple',
6: 'sextuple',
}
NETWORK_COLOR = {
'H1': 'red',
'L1': (0.2, 0.8, 0.2),
'V1': (0.5, 0., 0.75),
'G1': 'gray',
'no': 'black',
'single': (1.0, 0.7, 0.0),
'double': (0.0, 0.4, 1.0),
'triple': 'pink',
'quadruple': (1.0, 0.4, 0.0),
}
defaults = SegmentPiePlot.defaults.copy()
defaults.update({
'legend-fontsize': 24,
})
def process(self):
# get segments
if self.state and not self.all_data:
valid = self.state.active
else:
valid = SegmentList([self.span])
# construct compound flags for each network size
flags = dict((f[:2],f) for f in self.flags)
network = ''.join(sorted(set(flags.keys())))
self.pargs.setdefault('title', '%s network duty factor' % network)
networkflags = []
colors = []
labels = []
exclude = DataQualityFlag()
for i in list(range(len(flags)+1))[::-1]:
name = self.NETWORK_NAME[i]
flag = '%s:%s' % (network, name)
networksegs = DataQualityFlag(flag, known=valid)
for ifoset in itertools.combinations(flags, i):
if not ifoset:
compound = '!%s' % '!'.join(flags.values())
else:
compound = '&'.join(flags[ifo] for ifo in ifoset)
segs = get_segments(compound, validity=valid, query=False,
padding=self.padding).coalesce()
networksegs += segs
globalv.SEGMENTS[flag] = networksegs - exclude
exclude = networksegs
networkflags.append(flag)
labels.append('%s interferometer' % name.title())
colors.append(self.NETWORK_COLOR.get(name))
self.pargs.setdefault('colors', colors)
self.pargs.setdefault('labels', labels)
# reset flags and generate plot
flags_ = self.flags
outputfile = self.outputfile
self.flags = networkflags
out = super(NetworkDutyPiePlot, self).process(outputfile=outputfile)
self.flags = flags_
return out
register_plot(NetworkDutyPiePlot)
class SegmentBarPlot(BarPlot, SegmentDataPlot):
type = 'segment-bar'
defaults = {
'edgecolor': 'white',
'scale': 'percent',
'color': GREEN,
'edgecolor': 'green',
'alpha': .6,
}
SCALE_UNIT = {
None: 'seconds',
1: 'seconds',
'percent': r'\%',
60: 'minutes',
3600: 'hours',
}
def init_plot(self, plot=Plot, geometry=(1,1)):
"""Initialise the Figure and Axes objects for this
`TimeSeriesDataPlot`.
"""
figsize = self.pargs.pop('figsize', [12, 6])
self.plot = Plot(figsize=figsize)
axes = [self.plot.gca()]
return self.plot, axes
def process(self, outputfile=None):
(plot, axes) = self.init_plot(plot=Plot)
ax = axes[0]
if self.state:
self.pargs.setdefault(
'suptitle',
'[%s-%s, state: %s]' % (self.span[0], self.span[1],
label_to_latex(str(self.state))))
else:
self.pargs.setdefault(
'suptitle', '[%s-%s]' % (self.span[0], self.span[1]))
suptitle = self.pargs.pop('suptitle', None)
if suptitle:
plot.suptitle(suptitle, y=0.993, va='top')
scale = self.pargs.pop('scale', 'percent')
if scale == 'percent':
self.pargs.setdefault('ylim', (0, 100))
elif isinstance(scale, (int, float)):
self.pargs.setdefault('ylim', (0, abs(self.span) / scale))
try:
self.pargs.setdefault('ylabel', 'Livetime [%s]'
% self.SCALE_UNIT[scale])
except KeyError:
self.pargs.setdefault('ylabel', 'Livetime')
# extract plotting arguments
sort = self.pargs.pop('sorted', False)
plotargs = self.parse_plot_kwargs()
# get segments
data = []
labels = plotargs.pop('labels', self.flags)
for flag in self.flags:
if self.state and not self.all_data:
valid = self.state.active
else:
valid = SegmentList([self.span])
segs = get_segments(flag, validity=valid, query=False,
padding=self.padding).coalesce()
livetime = float(abs(segs.active))
if scale == 'percent':
data.append(100 * livetime / float(abs(segs.known)))
elif isinstance(scale, (float, int)):
data.append(livetime / scale)
if sort:
data, labels = zip(*sorted(
zip(data, labels), key=lambda x: x[0], reverse=True))
# make bar chart
width = plotargs.pop('width', .8)
x = numpy.arange(len(data)) - width/2.
patches = ax.bar(x, data, width=width, **plotargs)[0]
# set labels
ax.set_xticks(range(len(data)))
ax.set_xticklabels(labels, rotation=30,
rotation_mode='anchor', ha='right', fontsize=13)
ax.tick_params(axis='x', pad=2)
ax.xaxis.labelpad = 2
ax.xaxis.grid(False)
self.pargs.setdefault('xlim', (-.5, len(data)-.5))
# customise plot
for key, val in self.pargs.iteritems():
try:
getattr(ax, 'set_%s' % key)(val)
except AttributeError:
setattr(ax, key, val)
# add bit mask axes and finalise
self.pargs['xlim'] = None
return self.finalize(outputfile=outputfile, transparent="True",
pad_inches=0)
register_plot(SegmentBarPlot)
class SegmentHistogramPlot(get_plot('histogram'), SegmentDataPlot):
"""Histogram of segment duration
"""
type = 'segment-histogram'
data = 'segments'
defaults = {'ylabel': 'Number of segments',
'log': False,
'histtype': 'stepfilled',
'bottom': 0,
'rwidth': 1}
def process(self, outputfile=None):
# make axes
(plot, axes) = self.init_plot()
# use state to generate suptitle with GPS span
if self.state:
self.pargs.setdefault(
'suptitle',
'[%s-%s, state: %s]' % (self.span[0], self.span[1],
label_to_latex(str(self.state))))
else:
self.pargs.setdefault(
'suptitle', '[%s-%s]' % (self.span[0], self.span[1]))
suptitle = self.pargs.pop('suptitle', None)
if suptitle:
plot.suptitle(suptitle, y=0.993, va='top')
# extract plotting arguments
histargs = self.parse_plot_kwargs()
# get segments
data = []
for flag in self.flags:
if self.state and not self.all_data:
valid = self.state.active
else:
valid = SegmentList([self.span])
segs = get_segments(flag, validity=valid, query=False,
padding=self.padding).coalesce()
livetime = float(abs(segs.active))
data.append(map(lambda x: float(abs(x)), segs.active))
# get range
if not 'range' in histargs[0]:
l = axes[0].common_limits(data)
for d in histargs:
d['range'] = l
# plot
for ax, arr, pargs in zip(cycle(axes), data, histargs):
if len(arr) == 0:
kwargs = dict(
(k, pargs[k]) for k in ['label', 'color'] if pargs.get(k))
ax.plot([], **kwargs)
else:
if pargs.get('normed', False) in ['N', 'num', 'number']:
pargs['normed'] = False
pargs.setdefault('weights', [1/len(arr)] * len(arr))
ax.hist(arr, **pargs)
# customise plot
legendargs = self.parse_legend_kwargs()
for i, ax in enumerate(axes):
for key, val in self.pargs.iteritems():
if key == 'title' and i > 0:
continue
if key == 'xlabel' and i < (len(axes) - 1):
continue
if key == 'ylabel' and (
(len(axes) % 2 and i != len(axes) // 2) or
(len(axes) % 2 == 0 and i > 0)):
continue
try:
getattr(ax, 'set_%s' % key)(val)
except AttributeError:
setattr(ax, key, val)
if len(self.flags) > 1:
plot.add_legend(ax=ax, **legendargs)
if len(axes) % 2 == 0 and axes[0].get_ylabel():
label = axes[0].yaxis.label
ax = axes[int(len(axes) // 2)-1]
ax.set_ylabel(label.get_text())
ax.yaxis.label.set_position((0, -.2 / len(axes)))
if len(axes) != 2:
label.set_text('')
# set common ylim
if 'ylim' not in self.pargs:
y0 = min([ax.get_ylim()[0] for ax in axes])
y1 = max([ax.get_ylim()[1] for ax in axes])
for ax in axes:
ax.set_ylim(y0, y1)
# add bit mask axes and finalise
return self.finalize(outputfile=outputfile, transparent="True",
pad_inches=0)
register_plot(SegmentHistogramPlot)
|
gpl-3.0
|
Mazecreator/tensorflow
|
tensorflow/contrib/timeseries/examples/known_anomaly.py
|
53
|
6786
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example of using an exogenous feature to ignore a known anomaly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
from os import path
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/changepoints.csv")
def train_and_evaluate_exogenous(csv_file_name=_DATA_FILE, train_steps=300):
"""Training, evaluating, and predicting on a series with changepoints."""
# Indicate the format of our exogenous feature, in this case a string
# representing a boolean value.
string_feature = tf.contrib.layers.sparse_column_with_keys(
column_name="is_changepoint", keys=["no", "yes"])
# Specify the way this feature is presented to the model, here using a one-hot
# encoding.
one_hot_feature = tf.contrib.layers.one_hot_column(
sparse_id_column=string_feature)
estimator = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=12,
# Extract a smooth period by constraining the number of latent values
# being cycled between.
cycle_num_latent_values=3,
num_features=1,
exogenous_feature_columns=[one_hot_feature],
# Make exogenous updates sparse by setting an update condition. This in
# effect allows missing exogenous features: if the condition evaluates to
# False, no update is performed. Otherwise we sometimes end up with
# "leaky" updates which add unnecessary uncertainty to the model even when
# there is no changepoint.
exogenous_update_condition=
lambda times, features: tf.equal(features["is_changepoint"], "yes"))
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
# Indicate the format of our CSV file. First we have two standard columns,
# one for times and one for values. The third column is a custom exogenous
# feature indicating whether each timestep is a changepoint. The
# changepoint feature name must match the string_feature column name
# above.
column_names=(tf.contrib.timeseries.TrainEvalFeatures.TIMES,
tf.contrib.timeseries.TrainEvalFeatures.VALUES,
"is_changepoint"),
# Indicate dtypes for our features.
column_dtypes=(tf.int64, tf.float32, tf.string),
# This CSV has a header line; here we just ignore it.
skip_header_lines=1)
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
# Use truncated backpropagation with a window size of 64, batching
# together 4 of these windows (random offsets) per training step. Training
# with exogenous features often requires somewhat larger windows.
reader, batch_size=4, window_size=64)
estimator.train(input_fn=train_input_fn, steps=train_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Create an input_fn for prediction, with a simulated changepoint. Since all
# of the anomalies in the training data are explained by the exogenous
# feature, we should get relatively confident predictions before the indicated
# changepoint (since we are telling the model that no changepoint exists at
# those times) and relatively uncertain predictions after.
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100,
exogenous_features={
"is_changepoint": [["no"] * 49 + ["yes"] + ["no"] * 50]})))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
# Indicate the locations of the changepoints for plotting vertical lines.
anomaly_locations = []
with open(csv_file_name, "r") as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
if row["is_changepoint"] == "yes":
anomaly_locations.append(int(row["time"]))
anomaly_locations.append(predictions["times"][49])
return (times, observed, all_times, mean, upper_limit, lower_limit,
anomaly_locations)
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit, anomaly_locations):
"""Plot the time series and anomalies in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.axvline(anomaly_locations[0], linestyle="dotted", label="changepoints")
for anomaly_location in anomaly_locations[1:]:
pyplot.axvline(anomaly_location, linestyle="dotted")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
make_plot("Ignoring a known anomaly", *train_and_evaluate_exogenous())
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
|
apache-2.0
|
wanggang3333/scikit-learn
|
sklearn/linear_model/omp.py
|
127
|
30417
|
"""Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import as_float_array, check_array, check_X_y
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
# check_finite=False is an optimization available only in scipy >=0.12
solve_triangular_args = {'check_finite': False}
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=X.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=Gram.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = np.sqrt(1 - v)
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False,
copy_X=True, return_path=False,
return_n_iter=False):
"""Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
X : array, shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y : array, shape (n_samples,) or (n_samples, n_targets)
Input targets
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
precompute : {True, False, 'auto'},
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp_gram
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
X = check_array(X, order='F', copy=copy_X)
copy_X = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if precompute == 'auto':
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y ** 2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
copy_Gram=copy_X, copy_Xy=False,
return_path=return_path)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(
X, y[:, k], n_nonzero_coefs, tol,
copy_X=copy_X, return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
norms_squared=None, copy_Gram=True,
copy_Xy=True, return_path=False,
return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data: X.T * X
Xy : array, shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like, shape (n_targets,)
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order '
'to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))
else:
coef = np.zeros((len(Gram), Xy.shape[1]))
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(
Gram, Xy[:, k], n_nonzero_coefs,
norms_squared[k] if tol is not None else None, tol,
copy_Gram=copy_Gram, copy_Xy=copy_Xy,
return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
"""Orthogonal Matching Pursuit model (OMP)
Parameters
----------
n_nonzero_coefs : int, optional
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, optional
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
precompute : {True, False, 'auto'}, default 'auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when `n_targets` or `n_samples` is
very large. Note that if you already have such matrices, you can pass
them directly to the fit method.
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
coef_ : array, shape (n_features,) or (n_features, n_targets)
parameter vector (w in the formula)
intercept_ : float or array, shape (n_targets,)
independent term in decision function.
n_iter_ : int or array-like
Number of active features across every target.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
decomposition.sparse_encode
"""
def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True,
normalize=True, precompute='auto'):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std, Gram, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
coef_, self.n_iter_ = orthogonal_mp(
X, y, self.n_nonzero_coefs_, self.tol,
precompute=False, copy_X=True,
return_n_iter=True)
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
coef_, self.n_iter_ = orthogonal_mp_gram(
Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol, norms_squared=norms_sq,
copy_Gram=True, copy_Xy=True,
return_n_iter=True)
self.coef_ = coef_.T
self._set_intercept(X_mean, y_mean, X_std)
return self
def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True,
fit_intercept=True, normalize=True, max_iter=100):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues: array, shape (n_samples, max_features)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None,
precompute=False, copy_X=False,
return_path=True)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
"""Cross-validated Orthogonal Matching Pursuit model (OMP)
Parameters
----------
copy : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
intercept_ : float or array, shape (n_targets,)
Independent term in decision function.
coef_ : array, shape (n_features,) or (n_features, n_targets)
Parameter vector (w in the problem formulation).
n_nonzero_coefs_ : int
Estimated number of non-zero coefficients giving the best mean squared
error over the cross-validation folds.
n_iter_ : int or array-like
Number of active features across every target for the model refit with
the best hyperparameters got by cross-validating across all folds.
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
OrthogonalMatchingPursuit
LarsCV
LassoLarsCV
decomposition.sparse_encode
"""
def __init__(self, copy=True, fit_intercept=True, normalize=True,
max_iter=None, cv=None, n_jobs=1, verbose=False):
self.copy = copy
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
y : array-like, shape [n_samples]
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True)
X = as_float_array(X, copy=False, force_all_finite=False)
cv = check_cv(self.cv, X, y, classifier=False)
max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train], y[train], X[test], y[test], self.copy,
self.fit_intercept, self.normalize, max_iter)
for train, test in cv)
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1)
for fold in cv_paths])
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs,
fit_intercept=self.fit_intercept,
normalize=self.normalize)
omp.fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
self.n_iter_ = omp.n_iter_
return self
|
bsd-3-clause
|
btabibian/scikit-learn
|
sklearn/feature_extraction/tests/test_dict_vectorizer.py
|
110
|
3768
|
# Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
|
bsd-3-clause
|
zhenv5/scikit-learn
|
sklearn/neighbors/nearest_centroid.py
|
199
|
7249
|
# -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
|
bsd-3-clause
|
iprafols/cross_correlations
|
lib/correlation_process.py
|
1
|
132623
|
"""
Module to process the outputs of the cross-correlation code
DESCRIPTION:
This module provides the classes CorrData, CorrModel, CorrDataError,
CorrDataWarning, CorrModelError, and CorrelationProcessError
to enable easy treatment of the outputs of the cross-correlation code. The first
two classes are for data and model treatment and the others are for handling errors
and warnings.
See the specific documentation of each of the classes for details.
The module provides a function to display the documentation of the different
classes, mehtods, and functions called showDocumentation. Type
showDocumentation(showDocumentation) for details on its behaviour.
CLASSES:
CorrData
CorrDataWarning
CorrDataError
CorrModel
CorrModelError
CorrelationProcessError
CorrelationProcessWarning
FUNCTIONS:
plot
rebinIgnoringCovMat
showDocumentation
trim
TO DO:
Add test capabilities
"""
import warnings
import inspect
import sys
import os
import difflib
import numpy as np
import matplotlib
try:
import matplotlib.pyplot as plt
except RuntimeError:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors
from matplotlib.colors import colorConverter
import matplotlib.gridspec as gridspec
from matplotlib.ticker import MultipleLocator, FormatStrFormatter, NullFormatter, ScalarFormatter
__author__ = 'Ignasi Perez-Rafols'
__copyright__ = 'CC by-nc-sa'
__credits__ = ['Ignasi Perez-Rafols']
__license__ = 'GPL'
__version__ = '1.0.1'
__maintainer__ = 'Ignasi Perez-Rafols'
__email__ = '[email protected]'
__status__ = 'Production'
class CorrData(object):
"""
Manage the data generated by the cross-correlation code.
CLASS: CorrData
PURPOSE:
Manage the data generated by the cross-correlation code
PUBLIC METHODS:
__init__(filename, pi, sigma)
computeTransformationMatrix(new_pi, new_sigma, keep_matrix=False)
rebinData(transformation_matrix, keep_results=False, rebin_grid=True)
whichBins(sigma_min, sigma_max, rebinned=False)
cov_mat(rebinned=False)
error(rebinned=False)
data_mat(rebinned=False)
grid_pi_mat(rebinend=False)
grid_sigma_mat(rebinned=False)
has_grid()
pi(rebinned=False)
sigma(rebinned=False)
PRIVATE METHODS (should not be called except by class methods):
_formatData(self)
PRIVATE VARIABLES (should not be accessed except by class methods):
_data (structured np.ndarray]): Contains the measurement of the cross-correlation for the
different bins
_data_mat (np.ndarray): Matrix form of _data
_cov (structured np.ndarray): Contains the measurement of the covariance matrix for the
different bins
_cov_mat (np.ndarray): Matrix form of _cov
_grid (structured np.ndarray): Contains the grid layout for the different bins. Ignored if
_has_grid is False
_grid_pi_mat (np.ndarray): Matrix form of the values of parallel separation from _grid.
If _has_grid is False, the values are computed from _pi and
_sigma
_grid_sigma_mat (np.ndarray): Matrix form of the values of perpendicular separation from
_grid. If _has_grid is False, the values are computed from
_pi and _sigma
_grid_z_mat (np.ndarray): Matrix form of the values of the redshift from _grid. Ignored
if _has_grid is False
_dist (structured np.ndarray): Contains the measurement of the distortion matrix. Ignored
if _has_dist is False
_dist_mat (np.ndarray): Matrix form of _dist. Ignored if _has_dist is False
_has_grid (boolean): True if the grid layout is specified, False otherwise
_has_dmat (boolean): True if the distortion matrix is specified, False otherwise
_pi (np.ndarray): An array specifying the limits of the parallel separation
bins
_sigma (np.ndarray]): An array specifying the limits of the perpendicular
separation bins
_transformatiom_matrix (np.ndarray): An array containing the conversion to the last specified
binning
_pi_new (np.ndarray): An array containing the rebinned bins in pi
_sigma_new (np.ndarray): An array containing the rebinned bins in sigma
_cov_mat_new (np.ndarray): An array containing the rebinned version of _cov_mat
_data_mat_new (np.ndarray): An array containing the rebinned version of _data_mat
_grid_pi_mat_new (np.ndarray): An array containing the rebinned version of _grid_pi_mat.
_grid_sigma_mat_new (np.ndarray): An array containing the rebinned version of _grid_sigma_mat.
_grid_z_mat_new (np.ndarray): An array containing the rebinned version of _grid_z_mat.
Ignored if _has_grid is False
ASSOCIATED WARNING AND ERORR CLASSES:
CorrDataWarning
CorrDataError
"""
def __init__(self, filename, pi, sigma):
"""
Initialize class instance.
FUNCTION: CorrData.__init__
TYPE: Constructor, public
PURPOSE:
Initialize class instance
ARGUMENTS:
filename (string): Name of the file containing the data to process (without the extension)
pi (np.ndarray): Intervals of parallel separation at which the data was measured
sigma (np.ndarray): Intervals of perpendicular separation at which the data was measured
RETURNS:
A initialized instance of CorrData
EXCEPTION SAFETY:
Raises a CorrDataError instance if the arguments are of incorrect type, data is not readable
or data shapes are not consistent. Prints a CorrDataWarning if the specified bin ranges are
not consistent with the read data. The instance is still formed after the warning, but some
features may produce unexpected results
EXAMPLES:
data = CorrData("my_measurement", np.arange(-80, 80 + 2, 2), np.arange(0, 80 + 2, 2))
"""
# check parameters' types
if not (type(filename) == str):
raise CorrDataError(self.__init__, 'Incorrect type of the parameter "filename". Could not generate instance')
if not (isinstance(pi, np.ndarray)):
raise CorrDataError(self.__init__, 'Incorrect type of the parameter "pi". Could not generate instance')
if not (isinstance(sigma, np.ndarray)):
raise CorrDataError(self.__init__, 'Incorrect type of the parameter "sigma". Could not generate instance')
# read data
try:
self._data = np.genfromtxt(filename + '.data', dtype=[('index', int), ('value', float)])
self._cov = np.genfromtxt(filename + '.cov', dtype=[('index1', int), ('index2', int), ('value', float)])
except IOError as ioerror:
raise CorrDataError(self.__init__, str(ioerror).split(']')[-1])
# read grid
try:
self._grid = np.genfromtxt(filename + '.grid', dtype=[('index', int),
('pi', float),
('sigma', float),
('z', float)])
self._has_grid = True
# if there is no grid, just assume the middle values of the specified intervals
except IOError:
self._grid = np.array([ (i * (sigma.size - 1) + j, (pi[i] + pi[i + 1]) / 2.0, (sigma[j] + sigma[j + 1]) / 2.0) for i in range(0, pi.size - 1) for j in range(0, sigma.size - 1) ], dtype=[('index', int), ('pi', float), ('sigma', float)])
self._has_grid = False
# read distortion matrix
try:
self._dist = np.genfromtxt(filename + '.dmat', dtype=[('index1', int), ('index2', int), ('value', float)])
self._has_dist = True
except IOError:
self._has_dist = False
# format data into matrixes
self._formatData()
# check that matrix shapes are consistent
if not (self._cov_mat.shape[0] == self._data_mat.size):
raise CorrDataError(self.__init__, 'Read cov_mat and data have shapes that are not consistent. cov_mat.shape = {}, data.shape = {}'.format(self._cov_mat.shape, self._data_mat.shape))
if not (self._grid_pi_mat.size == self._data_mat.size):
raise CorrDataError(self.__init__, 'Read grid_pi_mat and data have shapes that are not consistent. grid_pi_mat.shape = {}, data.shape = {}'.format(self._grid_pi_mat.shape, self._data_mat.shape))
if not (self._grid_sigma_mat.size == self._data_mat.size):
raise CorrDataError(self.__init__, 'Read grid_sigma_mat and data have shapes that are not consistent. grid_sigma_mat.shape = {}, data.shape = {}'.format(self._grid_sigma_mat.shape, self._data_mat.shape))
if self._has_grid:
if not (self._grid_z_mat.size == self._data_mat.size):
raise CorrDataError(self.__init__, 'Read grid_z_mat and data have shapes that are not consistent. grid_z_mat.shape = {}, data.shape = {}'.format(self._grid_z_mat.shape, self._data_mat.shape))
if self._has_dist:
if not (self._dist_mat.shape[0] == self._data_mat.size):
raise CorrDataError(self.__init__, 'Read dist_mat and data have shapes that are not consistent. dist_mat.shape = {}, data.shape = {}'.format(self._dist_mat.shape, self._data_mat.shape))
# save intervals for later usage
self._pi = np.copy(pi)
self._sigma = np.copy(sigma)
try:
assert (self._pi.size - 1) * (self._sigma.size - 1) == self._data_mat.size
except AssertionError:
warnings.warn(CorrDataWarning(self.__init__, 'The specified intervals are ill-formed. This may lead to unexpected behaviour'))
def _formatData(self):
"""
Formats current data into operable matrixes.
FUNCTION: CorrData._formatData
TYPE: Private (to be accessed from class member functions only)
PURPOSE:
Formats current data into operable matrixes. Formatted matrixes are stored as new
instance members and are not linked with the original arrays.
EXAMPLES:
self._formatData()
"""
self._data_mat = np.copy(self._data['value'])
self._cov_mat = np.zeros((np.amax([self._cov['index1'], self._cov['index2']]) + 1, np.amax([self._cov['index1'], self._cov['index2']]) + 1))
for index1, index2, value in self._cov:
self._cov_mat[index1][index2] = value
self._cov_mat[index2][index1] = value
self._grid_pi_mat = np.copy(self._grid['pi'])
self._grid_sigma_mat = np.copy(self._grid['sigma'])
if self._has_grid:
self._grid_z_mat = np.copy(self._grid['z'])
if self._has_dist:
self._dist_mat = np.zeros((np.amax([self._dist['index1'], self._dist['index2']]) + 1, np.amax([self._dist['index1'], self._dist['index2']]) + 1))
for index1, index2, value in self._dist:
self._dist_mat[index1][index2] = value
self._dist_mat[index2][index1] = value
def computeTransformationMatrix(self, new_pi, new_sigma, keep_matrix = False):
"""
Compute the transformation matrix from the data bins to the given bins.
FUNCTION: CorrData.computeTransformationMatrix
TYPE: Public
PURPOSE:
Compute the transformation matrix from the data bins to the specified bins. The bins must be
wider than the original bins. Upon options may store the resulting matrix as a instance
attribute. Previous similar instances are deleted.
ARGUMENTS:
new_pi (np.ndarray): An array specifying the limits of the parallel separation bins
new_sigma (np.ndarray): An array specifying the limits of the perpendicular separation bins
KEYWORD ARGUMENTS:
keep_matrix (boolean): If True, keeps the resulting transformation matrix and the new binning
as a private instance attributes -- Default: False
RETURNS:
The transformation matrix between the data bins to the specified bins
EXCEPTION SAFETY:
Raises a CorrDataError instance if the parameters don't have the correct type or if the
specified bins are smaller than the originals.
EXAMPLES:
data.computeTransformatiomMatrix(np.arange(-80, 80 + 4, 4), np.arange(0, 80+4, 4))
data.computeTransformatiomMatrix(np.arange(-80, 80 + 4, 4), np.arange(0, 80+4, 4), keep_matrix=True)
data.computeTransformatiomMatrix(np.arange(-80, 80 + 4, 4), np.arange(0, 80+4, 4), keep_matrix=False)
"""
# check parameters' types
if not (isinstance(new_pi, np.ndarray)):
raise CorrDataError(self.computeTransformationMatrix, 'Incorrect type of the parameter "new_pi".')
if not (isinstance(new_sigma, np.ndarray)):
raise CorrDataError(self.computeTransformationMatrix, 'Incorrect type of the parameter "new_sigma".')
if not (type(keep_matrix) == bool):
raise CorrDataError(self.computeTransformationMatrix, 'Incorrect type of the parameter "keep_matrix".')
# check parameters' consistency
try:
assert new_pi.size <= self._pi.size
assert new_sigma.size <= self._sigma.size
except AssertionError:
raise CorrDataError(self.computeTransformationMatrix, 'Given bins are smaller than the originals.')
# delete previous transformation instances if existent
if hasattr(self, '_transformatiom_matrix'):
del self._transformatiom_matrix
if hasattr(self, '_pi_new'):
del self._pi_new
if hasattr(self, '_sigma_new'):
del self._sigma_new
transformation_matrix = np.zeros((self._data_mat.size, (new_pi.size - 1) * (new_sigma.size - 1)))
# compute the relation between the old and new bins in parallel separation
k_pi_conv = {}
for k_pi in range(0, self._pi.size - 1):
k_pi_new = 0
try:
aux = (self._pi[k_pi] + self._pi[k_pi + 1]) / 2.0
while not (new_pi[k_pi_new] <= aux and aux <= new_pi[k_pi_new + 1]):
k_pi_new += 1
assert k_pi_new < new_pi.size - 1
except AssertionError:
continue
k_pi_conv[k_pi] = k_pi_new
# compute the relation between the old and new bins in perpendicular separation
k_sigma_conv = {}
for k_sigma in range(0, self._sigma.size - 1):
k_sigma_new = 0
try:
aux = (self._sigma[k_sigma] + self._sigma[k_sigma + 1]) / 2.0
while not (new_sigma[k_sigma_new] <= aux and aux <= new_sigma[k_sigma_new + 1]):
k_sigma_new += 1
assert k_sigma_new < new_sigma.size - 1
except AssertionError:
continue
k_sigma_conv[k_sigma] = k_sigma_new
# compute the overall relation between old and new bins, then fill the transformation matrix
k_conv = {old_k_pi * (self._sigma.size - 1) + old_k_sigma:new_k_pi * (new_sigma.size - 1) + new_k_sigma for old_k_pi, new_k_pi in k_pi_conv.items() for old_k_sigma, new_k_sigma in k_sigma_conv.items()}
for old, new in k_conv.items():
transformation_matrix[old, new] = 1.0#"""
# keep transformation instances
if keep_matrix:
self._transformatiom_matrix = np.copy(transformation_matrix)
self._pi_new = np.copy(new_pi)
self._sigma_new = np.copy(new_sigma)
return transformation_matrix
def rebinData(self, transformation_matrix, keep_results = False, rebin_grid = True):
"""
Rebins the data to the specified binning
FUNCTION: CorrData.rebinData
TYPE: Public
PURPOSE:
Rebins the data to the specified binning.
The rebinning is performed as follows
The covariance matrix is rebinned as
C_{new}^{-1} = S^{t} C^{-1} S,
where S is the specified transformation matrix
The data (and possibly the grid) is rebinned as
data_{new} = C_{new} S^{t} C^{-1} data
The grid is only rebinned if rebinGrid is True. If, at the same time, _has_grid is
False, then the method returns empty matrix for the rebined grid. Upon options the
method may store the resulting matrix as instance attributes. Previous similar instances
are deleted.
ARGUMENTS:
transformation_matrix (np.ndarray): The matrix that will be used in the transformation
KEYWORD ARGUMENTS:
keep_results (boolean): If True, keeps the resulting rebinned matrixes as
private instances attributes -- Default: False
rebin_grid (boolean): If True, rebins the grid as well. In case _has_grid
is False, returns empty matrixes for the rebinned grid
-- Default: True
RETURNS:
The rebinned matrixes. The order is cov_mat_new, data_mat_new if rebinGrid is False and
cov_mat_new, data_mat_new, grid_pi_mat_new, grid_sigma_mat_new, grid_z_mat_new otherwise
EXCEPTION SAFETY:
Raises a CorrDataError instance if the parameters don't have the correct type or if the specified
bins are smaller than the originals.
EXAMPLES:
cov_mat_new, data_mat_new, = data.rebinData(transf_matrix)
cov_mat_new, data_mat_new, = data.rebinData(transf_matrix, keep_results=True)
cov_mat_new, data_mat_new, = data.rebinData(transf_matrix, keep_results=False)
cov_mat_new, data_mat_new, grid_pi_mat_new,
grid_sigma_mat_new, grid_z_mat_new = data.rebinData(transf_matrix)
cov_mat_new, data_mat_new, grid_pi_mat_new,
grid_sigma_mat_new, grid_z_mat_new = data.rebinData(transf_matrix, keep_results=True)
cov_mat_new, data_mat_new, grid_pi_mat_new,
grid_sigma_mat_new, grid_z_mat_new = data.rebinData(transf_matrix, keep_results=False)
"""
# check parameters' types
if not (isinstance(transformation_matrix, np.ndarray)):
raise CorrDataError(self.rebinData, 'Incorrect type of the parameter "transformation_matrix".')
if not (type(keep_results) == bool):
raise CorrDataError(self.rebinData, 'Incorrect type of the parameter "keep_results".')
if not (type(rebin_grid) == bool):
raise CorrDataError(self.rebinData, 'Incorrect type of the parameter "rebin_grid".')
# check parameters' consistency
try:
assert transformation_matrix.shape[0] == self._data_mat.size
except AssertionError:
raise CorrDataError(self.rebinData, 'Incorrect shape of transformation_matrix. Expected ({},), found {}'.format(self._data_mat.size, transformation_matrix.shape))
# delele previous rebinned instances if present
if hasattr(self, '_cov_mat_new'):
del self._cov_mat_new
if hasattr(self, '_data_mat_new'):
del self._data_mat_new
if hasattr(self, '_grid_pi_mat_new'):
del self._grid_pi_mat_new
if hasattr(self, '_grid_sigma_mat_new'):
del self._grid_sigma_mat_new
if hasattr(self, '_grid_z_mat_new'):
del self._grid_z_mat_new
# first, rebin the covariance matrix
inv_cov_mat = np.linalg.inv(self._cov_mat)
inv_cov_mat_new = np.dot(transformation_matrix.transpose(), np.dot(inv_cov_mat, transformation_matrix))
cov_mat_new = np.linalg.inv(inv_cov_mat_new)
# then rebin data and grid
data_mat_new = np.dot(cov_mat_new, np.dot(transformation_matrix.transpose(), np.dot(inv_cov_mat, self._data_mat)))
if rebin_grid:
# rebin grid omitting covariance matrix
inv_id_cov_mat = np.identity(self._cov_mat.shape[0], dtype=float)
inv_id_cov_mat_new = np.dot(transformation_matrix.transpose(), np.dot(inv_id_cov_mat, transformation_matrix))
id_cov_mat_new = np.linalg.inv(inv_id_cov_mat_new)
grid_pi_mat_new = np.dot(id_cov_mat_new, np.dot(transformation_matrix.transpose(), self._grid_pi_mat))
grid_sigma_mat_new = np.dot(id_cov_mat_new, np.dot(transformation_matrix.transpose(), self._grid_sigma_mat))
if self._has_grid:
grid_z_mat_new = np.dot(id_cov_mat_new, np.dot(transformation_matrix.transpose(), self._grid_z_mat))
# TODO:
# keep rebinned instances
if keep_results:
self._cov_mat_new = np.copy(cov_mat_new)
self._data_mat_new = np.copy(data_mat_new)
self._grid_pi_mat_new = np.copy(grid_pi_mat_new)
self._grid_sigma_mat_new = np.copy(grid_sigma_mat_new)
if self._has_grid:
self._grid_z_mat_new = np.copy(grid_z_mat_new)
if rebin_grid:
if self._has_grid:
return (cov_mat_new,
data_mat_new,
grid_pi_mat_new,
grid_sigma_mat_new,
grid_z_mat_new)
else:
return (cov_mat_new,
data_mat_new,
grid_pi_mat_new,
grid_sigma_mat_new,
np.array([]))
else:
return (cov_mat_new, data_mat_new)
def whichBins(self, sigma_min, sigma_max, rebinned = False):
"""
Determines which bins have perpedicular separations between sigma_min and sigma_max
FUNCTION: CorrData.whichBins
TYPE: Public
PURPOSE:
Determines which bins have perpedicular separations between sigma_min and sigma_max.
Bins with perpendicular separations equal to sigma_min are included and bins with
perpendicular separations equal to sigma_max are excluded.
ARGUMENTS:
sigma_min (float): Minimum value of perpendicular separation to include a bin in the
return list
sigma_max (float): Maximum value of perpendicular separation to include a bin in the
return list.
KEYWORD ARGUMENTS:
rebinned (bool): If True, the rebinned matrixes will be considered except. If the
rebinned matrixes do not contain grid information, then this flag
is ignored. -- Default: False
RETURNS: a np.where output with the position of the selected bins
EXAMPLES:
pos = data.whichBins(1.0, 3.0)
pos = data.whichBins(1.0, 3.0, rebinned=True)
"""
# check parameters' types
if not (type(sigma_min) == float or isinstance(sigma_min, np.float64)):
raise CorrDataError(self.whichBins, 'Incorrect type of the parameter "sigma_min".')
if not (type(sigma_max) == float or isinstance(sigma_max, np.float64)):
raise CorrDataError(self.whichBins, 'Incorrect type of the parameter "sigma_max".')
if not (type(rebinned) == bool):
raise CorrDataError(self.whichBins, 'Incorrect type of the parameter "rebinned".')
# check parameters' consistency
try:
assert sigma_max > sigma_min
except AssertionError:
raise CorrDataError(self.whichBins, 'sigma_max is lower than sigma_min')
if rebinned:
if hasattr(self, '_grid_sigma_mat_new'):
return np.where((self._grid_sigma_mat_new >= sigma_min) & (self._grid_sigma_mat_new < sigma_max))
return np.where((self._grid_sigma_mat >= sigma_min) & (self._grid_sigma_mat < sigma_max))
def cov_mat(self, rebinned = False):
"""
Returns a copy of _cov_mat
FUNCTION: CorrData.cov_mat
TYPE: Access
PURPOSE: Returns a copy of _cov_mat
KEYWORD ARGUMENTS:
rebinned: if True, returns a copy of _cov_mat_new instead. If _cov_mat_new does not exist,
then this flag is ignored
RETURNS: A copy of _cov_mat
EXAMPLES:
cov_mat = data.cov_mat()
cov_mat = data.cov_mat(rebinned=True)
"""
if rebinned:
if hasattr(self, '_cov_mat_new'):
return np.copy(self._cov_mat_new)
return np.copy(self._cov_mat)
def data_mat(self, rebinned = False):
"""
Returns a copy of _data_mat
FUNCTION: CorrData.data_mat
TYPE: Access
PURPOSE: Returns a copy of _data_mat
KEYWORD ARGUMENTS:
rebinned: if True, returns a copy of _data_mat_new instead. If _data_mat_new does not exist,
then this flag is ignored
RETURNS: A copy of _data_mat
EXAMPLES:
data_mat = data.data_mat()
data_mat = data.data_mat(rebinned=True)
"""
if rebinned:
if hasattr(self, '_data_mat_new'):
return np.copy(self._data_mat_new)
return np.copy(self._data_mat)
def error(self, rebinned = False):
"""
Returns the square root of the diagonal elements of _cov_mat
FUNCTION: CorrData.error()
TYPE: Access
PURPOSE: Returns the square root of the diagonal elements of _cov_mat
KEYWORD ARGUMENTS:
rebinned: if True, returns a copy of _cov_mat_new instead. If _cov_mat_new does
not exist, then this flag is ignored
RETURNS: The square root of the diagonal elements of _cov_mat
EXAMPLES:
cov_mat = data.cov_mat_new()
cov_mat = data.cov_new(rebinned=True)
"""
if rebinned:
if hasattr(self, '_cov_mat_new'):
return np.sqrt(np.array([ self._cov_mat_new[index1, index1] for index1 in range(0, self._cov_mat_new.shape[0]) ]))
return np.sqrt(np.array([ self._cov_mat[index1, index1] for index1 in range(0, self._cov_mat.shape[0]) ]))
def grid_pi_mat(self, rebinned = False):
"""
Returns a copy of _grid_pi_mat
FUNCTION: CorrData.grid_pi_mat()
TYPE: Access
PURPOSE: Returns a copy of _grid_pi_mat
KEYWORD ARGUMENTS:
rebinned: if True, returns a copy of _grid_pi_mat_new instead. If _grid_pi_mat_new does
not exist, then this flag is ignored
RETURNS: A copy of _grid_pi_mat
EXAMPLES:
grid_pi_mat = data.grid_pi_mat()
grid_pi_mat = data.grid_pi_mat(rebinned=True)
"""
if rebinned:
if hasattr(self, '_grid_pi_mat_new'):
return np.copy(self._grid_pi_mat_new)
return np.copy(self._grid_pi_mat)
def grid_sigma_mat(self, rebinned = False):
"""
Returns a copy of _grid_sigma_mat
FUNCTION: CorrData.grid_sigma_mat()
TYPE: Access
PURPOSE: Returns a copy of _grid_sigma_mat
KEYWORD ARGUMENTS:
rebinned: if True, returns a copy of _grid_sigma_mat_new instead. If _grid_sigma_mat_new does
not exist, then this flag is ignored
RETURNS: A copy of _grid_sigma_mat
EXAMPLES:
grid_pi_mat = data.grid_sigma_mat()
grid_pi_mat = data.grid_sigma_mat(rebinned=True)
"""
if rebinned:
if hasattr(self, '_grid_sigma_mat_new'):
return np.copy(self._grid_sigma_mat_new)
return np.copy(self._grid_sigma_mat)
def has_grid(self):
"""
Returns the current value of _has_grid
FUNCTION: CorrData.has_grid
TYPE: Access
PURPOSE: Returns the current value of _has_grid
RETURNS: current value of _has_grid
EXAMPLES:
has_grid = data.has_grid()
"""
return self._has_grid
def pi(self, rebinned = False):
"""
Returns a copy of _pi
FUNCTION: CorrData.pi()
TYPE: Access
PURPOSE: Returns a copy of _pi
KEYWORD ARGUMENTS:
rebinned: if True, returns a copy of _pi_new instead. If _pi_new does not exist,
then this flag is ignored
RETURNS: A copy of _pi
EXAMPLES:
pi = data.pi()
pi = data.pi(rebinned=True)
"""
if rebinned:
if hasattr(self, '_pi_new'):
return np.copy(self._pi_new)
return np.copy(self._pi)
def sigma(self, rebinned = False):
"""
Returns a copy of _sigma
FUNCTION: CorrData.sigma()
TYPE: Access
PURPOSE: Returns a copy of _sigma
KEYWORD ARGUMENTS:
rebinned: if True, returns a copy of _sigma_new instead. If _sigma_new does not exist,
then this flag is ignored
RETURNS: A copy of _sigma
EXAMPLES:
sigma = data.sigma()
sigma = data.sigma(rebinned=True)
"""
if rebinned:
if hasattr(self, '_sigma_new'):
return np.copy(self._sigma_new)
return np.copy(self._sigma)
class CorrDataWarning(Warning):
"""
Manage warnings related to CorrData
CLASS: CorrDataWarning
PURPOSE:
Manage warnings related to CorrData
PUBLIC FUNCTIONS:
__init__(filename, method, message)
__str__()
PRIVATE VARIABLES (should not be accessed outside the class body):
_method (CorrData method): Mehtod of CorrData that produced the warning
_message (string): Warning message
"""
def __init__(self, method, message):
"""
Initialize class instance
FUNCTION: CorrDataWarning.__init__
TYPE: Constructor, public
PURPOSE:
Initialize class instance
ARGUMENTS:
method (CorrData method): Mehtod of CorrData that produced the warning
message (string): The warning message
RETURNS:
A initialized instance of CorrDataWarning
EXAMPLES:
warning = CorrDataWarning("this is a warning")
"""
self._method = method.__name__
self._message = message
def __str__(self):
"""
Returns a printable representation of the warning message
FUNCTION: CorrDataWArning.__str__
TYPE: Public
PURPOSE:
Returns a printable representation of the warning message
RETURNS:
A printable representation of the warning message
EXAMPLES:
print CorrDataWarning("this is a warning")
"""
return 'In method {method}: {message}'.format(method=self._method, message=repr(self._message))
class CorrDataError(Exception):
"""
CorrData Exception
CLASS: CorrDataError
PURPOSE: CorrData Exception
PUBLIC FUNCTIONS:
__init__(filename, pi, sigma)
__str__()
PRIVATE VARIABLES (should not be accessed outside the class body):
_method (CorrData method): Mehtod of CorrData that produced the error
_message (string): Error message
"""
def __init__(self, method, message):
"""
Initialize class instance
FUNCTION: CorrDataError.__init__
TYPE: Constructor, public
PURPOSE: Initialize class instance
ARGUMENTS:
method (CorrData method): Mehtod of CorrData that produced the error
message (string): The error message
RETURNS:
A initialized instance of CorrDataError
EXAMPLES:
error = CorrDataError("this is an error")
"""
self._method = method.__name__
self._message = message
def __str__(self):
"""
Returns a printable representation of the error message
FUNCTION: CorrDataError.__str__
TYPE: Public
PURPOSE: Returns a printable representation of the error message
RETURNS:
A printable representation of the error message
EXAMPLES:
print CorrDataError("this is a warning")
"""
return 'In method {method}: {message}'.format(method=self._method, message=repr(self._message))
class CorrModel(object):
"""
Manage the models generated by the baofit code.
CLASS: CorrModel
PURPOSE:
Manage the models generated by the baofit code
PUBLIC METHODS:
__init__(filename, pi, sigma)
computeTransformationMatrix(new_pi, new_sigma, keep_matrix=False)
rebinData(transformation_matrix, keep_results=False, rebin_grid=True)
whichBins(sigma_min, sigma_max, rebinned=False)
model_mat(rebinned=False)
grid_pi_mat(rebinend=False)
grid_sigma_mat(rebinned=False)
PRIVATE METHODS (should not be called except by class methods):
_formatData(self)
PRIVATE VARIABLES (should not be accessed except by class methods):
_model (structured np.ndarray]): Contains the contents of the baofit residuals files
_model_mat (np.ndarray): Matrix form of the model
_data_mat (np.ndarray: Matrix form of data to which the model was read
_index_mat (np.ndarray): Matrix form of the indexes used in the modelling
_grid_pi_mat (np.ndarray): Matrix form of the values of parallel separation
_grid_sigma_mat (np.ndarray): Matrix form of the values of perpendicular separation
_grid_z_mat (np.ndarray): Matrix form of the values of the redshift
_transformatiom_matrix (np.ndarray): An array containing the conversion to the last specified
binning
_model_mat_new (np.ndarray): An array containing the rebinned version of _model_mat
_data_mat_new (np.ndarray): An array containing the rebinned version of _data_mat
_grid_pi_mat_new (np.ndarray): An array containing the rebinned version of _grid_pi_mat.
Ignored if _has_grid is False
_grid_sigma_mat_new (np.ndarray): An array containing the rebinned version of _grid_sigma_mat.
Ignored if _has_grid is False
_grid_z_mat_new (np.ndarray): An array containing the rebinned version of _grid_z_mat.
Ignored if _has_grid is False
ASSOCIATED WARNING AND ERORR CLASSES:
CorrModelError
"""
def __init__(self, filename):
"""
Initialize class instance.
FUNCTION: CorrData.__init__
TYPE: Constructor, public
PURPOSE:
Initialize class instance
ARGUMENTS:
filename (string): Name of the file containing the data to process (must end with 'residuals.dat')
RETURNS:
A initialized instance of CorrModel
EXCEPTION SAFETY:
Raises a CorrModelError instance if the arguments are of incorrect type or if data is not readable.
EXAMPLES:
model = CorrModel("my_fit_residuals.dat")
"""
# check parameters' types
if not (type(filename) == str):
raise CorrDataError(self.__init__, 'Incorrect type of the parameter "filename". Could not generate instance')
# check parameters' consistency
try:
assert filename[-13:] == 'residuals.dat'
except AssertionError:
raise CorrDataError(self.__init__, 'Incorrect type of the parameters. Could not generate instance')
# read model
try:
self._model = np.genfromtxt(filename, dtype=[('index', int),
('pi', float),
('sigma', float),
('z', float),
('model', float),
('data', float),
('error', float)], usecols=(0, 1, 2, 3, 7, 8, 9))
except IOError as ioerror:
raise CorrDataError(self.__init__, str(ioerror).split(']')[-1])
# format model into matrix
self._formatData()
def _formatData(self):
"""
Formats current data into operable matrixes.
FUNCTION: CorrModel._formatData
TYPE: Private (to be accessed only from class member functions)
PURPOSE:
Formats current data into operable matrixes. Formatted matrixes are stored as new
instance members and are not linked with the original arrays.
EXAMPLES:
self._formatData()
"""
self._index_mat = np.copy(self._model['index'])
self._model_mat = np.copy(self._model['model'])
self._data_mat = np.copy(self._model['data'])
self._grid_pi_mat = np.copy(self._model['pi'])
self._grid_sigma_mat = np.copy(self._model['sigma'])
self._grid_z_mat = np.copy(self._model['z'])
def computeTransformationMatrix(self, new_pi, new_sigma, keep_matrix = False):
"""
Compute the transformation matrix from the model bins to the given bins.
FUNCTION: CorrModel.computeTransformationMatrix
TYPE: Public (deprecated)
PURPOSE:
Compute the transformation matrix from the model bins to the specified bins. The bins must be
wider than the original bins. Upon options may store the resulting matrix as a instance
attribute. Previous similar instances are deleted.
ARGUMENTS:
new_pi (np.ndarray): An array specifying the limits of the parallel separation bins
new_sigma (np.ndarray): An array specifying the limits of the perpendicular separation bins
KEYWORD ARGUMENTS:
keep_matrix (boolean): If True, keeps the resulting transformation matrix as a private instance
attribute -- Default: False
RETURNS:
The transformation matrix between the model bins to the specified bins
EXCEPTION SAFETY:
Raises a CorrModelError instance if the parameters don't have the correct type or if the
specified bins are smaller than the originals.
EXAMPLES:
model.computeTransformatiomMatrix(np.arange(-80, 80 + 4, 4), np.arange(0, 80+4, 4))
model.computeTransformatiomMatrix(np.arange(-80, 80 + 4, 4), np.arange(0, 80+4, 4), keep_matrix=True)
model.computeTransformatiomMatrix(np.arange(-80, 80 + 4, 4), np.arange(0, 80+4, 4), keep_matrix=False)
"""
# check parameters' types
if not (isinstance(new_pi, np.ndarray)):
raise CorrModelError(self.computeTransformationMatrix, 'Incorrect type of the parameter "new_pi".')
if not isinstance(new_sigma, np.ndarray):
raise CorrModelError(self.computeTransformationMatrix, 'Incorrect type of the parameter "new_sigma".')
if not (type(keep_matrix) == bool):
raise CorrModelError(self.computeTransformationMatrix, 'Incorrect type of the parameter "keep_matrix".')
# check parameters' consistency
try:
assert new_pi.size * new_sigma.size <= np.amax(self._index_mat)
except AssertionError:
raise CorrModelError(self.computeTransformationMatrix, 'Given bins are smaller than the originals.')
# delete previous transformation instances if existent
if hasattr(self, '_transformatiom_matrix'):
del self._transformatiom_matrix
transformation_matrix = np.zeros((self._model_mat.size, (new_pi.size - 1) * (new_sigma.size - 1)))
# compute the overall relation between old and new bins
k_conv = {}
for k in range(0, self._grid_pi_mat.size):
k_pi_new = 0
try:
while not (new_pi[k_pi_new] <= self._grid_pi_mat[k] and self._grid_pi_mat[k] <= new_pi[k_pi_new + 1]):
k_pi_new += 1
assert k_pi_new < new_pi.size - 1
except AssertionError:
continue
k_sigma_new = 0
try:
while not (new_sigma[k_sigma_new] <= self._grid_sigma_mat[k] and self._grid_pi_mat[k] <= new_sigma[k_sigma_new + 1]):
k_sigma_new += 1
assert k_sigma_new < new_sigma.size - 1
except AssertionError:
continue
k_conv[k] = k_pi_new * (new_sigma.size - 1) + k_sigma_new
# then fill the transformation matrix
for old, new in k_conv.items():
transformation_matrix[old, new] = 1.0
# keep transformation instances
if keep_matrix:
self._transformatiom_matrix = np.copy(transformation_matrix)
return transformation_matrix
def rebinModel(self, new_pi, new_sigma, keep_results = False):
"""
Rebins the model to the specified binning
FUNCTION: CorrModel.rebinModel
TYPE: Public
PURPOSE:
Rebins the model to the specified binning. The bins must be
wider than the original bins. The binning is preformed by averaging
all the values of the original bin that fall into the new bin
Upon options the method may store the resulting matrix as instance attributes.
Previous similar instances are deleted.
ARGUMENTS:
new_pi (np.ndarray): An array specifying the limits of the parallel separation bins
new_sigma (np.ndarray): An array specifying the limits of the perpendicular separation bins
KEYWORD ARGUMENTS:
keep_results (boolean): If True, keeps the resulting rebinned matrixes as private instances
attributes -- Default: False
RETURNS:
The rebinned matrixes. The order is model_mat_new, data_mat_new, grid_pi_mat_new,
grid_sigma_mat_new, grid_z_mat_new, index_mat_new
EXCEPTION SAFETY:
Raises a CorrModelError instance if the parameters don't have the correct type or if the specified
bins are smaller than the originals.
EXAMPLES:
model_mat_new, data_mat_new, grid_pi_mat_new,
grid_sigma_mat_new, grid_z_mat_new, index_mat_new = model.rebinModel(pi, sigma)
model_mat_new, data_mat_new, grid_pi_mat_new,
grid_sigma_mat_new, grid_z_mat_new, index_mat_new = model.rebinModel(pi, sigma, keep_results=True)
model_mat_new, data_mat_new, grid_pi_mat_new,
grid_sigma_mat_new, grid_z_mat_new, index_mat_new = model.rebinModel(pi, sigma, keep_results=False)
"""
# check parameters' types
if not (isinstance(new_pi, np.ndarray)):
raise CorrModelError(self.rebinModel, 'Incorrect type of the parameter "new_pi".')
if not isinstance(new_sigma, np.ndarray):
raise CorrModelError(self.rebinModel, 'Incorrect type of the parameter "new_sigma".')
if not (type(keep_results) == bool):
raise CorrModelError(self.rebinModel, 'Incorrect type of the parameter "keep_results".')
# delele previous rebinned instances if present
if hasattr(self, '_model_mat_new'):
del self._model_mat_new
if hasattr(self, '_data_mat_new'):
del self._data_mat_new
if hasattr(self, '_grid_pi_mat_new'):
del self._grid_pi_mat_new
if hasattr(self, '_grid_sigma_mat_new'):
del self._grid_sigma_mat_new
if hasattr(self, '_grid_z_mat_new'):
del self._grid_z_mat_new
# initialize lists
index_mat_new = []
model_mat_new = []
data_mat_new = []
grid_pi_mat_new = []
grid_sigma_mat_new = []
grid_z_mat_new = []
# loop over new bins
for pi_index in range(new_pi.size - 1):
for sigma_index in range(new_sigma.size - 1):
# initialize variables
counts_new = 0.0
model_new = 0.0
data_new = 0.0
grid_pi_new = 0.0
grid_sigma_new = 0.0
grid_z_new = 0.0
# loop over old bins
for (model, data, grid_pi, grid_sigma, grid_z) in zip(self._model_mat, self._data_mat, self._grid_pi_mat,
self._grid_sigma_mat, self._grid_z_mat):
# check if old bin falls inside the new one
if (grid_pi >= new_pi[pi_index] and grid_pi <= new_pi[pi_index + 1] and grid_sigma >= new_sigma[sigma_index]
and grid_sigma <= new_sigma[sigma_index + 1]):
counts_new += 1.0
model_new += model
data_new += data
grid_pi_new += grid_pi
grid_sigma_new += grid_sigma
grid_z_new += grid_z
# normalize and add to list
if counts_new != 0.0:
index_mat_new.append(pi_index*(new_sigma.size - 1) + sigma_index)
model_mat_new.append(model_new/counts_new)
data_mat_new.append(data_new/counts_new)
grid_pi_mat_new.append(grid_pi_new/counts_new)
grid_sigma_mat_new.append(grid_sigma_new/counts_new)
grid_z_mat_new.append(grid_z/counts_new)
# recast list to ndarrays
index_mat_new = np.array(index_mat_new)
model_mat_new = np.array(model_mat_new)
data_mat_new = np.array(data_mat_new)
grid_pi_mat_new = np.array(grid_pi_mat_new)
grid_sigma_mat_new = np.array(grid_sigma_mat_new)
grid_z_mat_new = np.array(grid_z_mat_new)
# keep rebinned instances
if keep_results:
self._index_mat_new = np.copy(index_mat_new)
self._model_mat_new = np.copy(model_mat_new)
self._data_mat_new = np.copy(data_mat_new)
self._grid_pi_mat_new = np.copy(grid_pi_mat_new)
self._grid_sigma_mat_new = np.copy(grid_sigma_mat_new)
self._grid_z_mat_new = np.copy(grid_z_mat_new)
return (model_mat_new, data_mat_new, grid_pi_mat_new, grid_sigma_mat_new, grid_z_mat_new, index_mat_new)
def whichBins(self, sigma_min, sigma_max, rmin, subset='all', rebinned = False):
"""
Determines which bins have perpedicular separations between sigma_min and sigma_max
FUNCTION: CorrModel.whichBins
TYPE: Public
PURPOSE:
Determines which bins have perpedicular separations between sigma_min and sigma_max,
and have r bigger or equal to rmin.
Bins with perpendicular separations equal to sigma_min are included and bins with
perpendicular separations equal to sigma_max are excluded. Upon options only bins
with positive or negative values of pi are included.
ARGUMENTS:
sigma_min (float): Minimum value of perpendicular separation to include a bin in the
return list
sigma_max (float): Maximum value of perpendicular separation to include a bin in the
return list.
rmin (float): Minimum r for the bin to be included
KEYWORD ARGUMENTS:
subset (string): A string specifying whether or not there is an additional constrain
on the value of the parallel separation. It can have the values
"pos" (includes only bins with positive or zero parallel separations),
"neg" (includes only bins with negative or zero parallel separations),
or "all" (no additional constrain) -- Default: "all"
rebinned (bool): If True, the rebinned matrixes will be considered except. If the
rebinned matrixes do not contain grid information, then this flag
is ignored. -- Default: False
RETURNS: a np.where output with the position of the selected bins
EXAMPLES:
pos = model.whichBins(1.0, 3.0, 0.0)
pos = model.whichBins(1.0, 3.0, 0.0, rebinned=True)
pos = model.whichBins(1.0, 3.0, 0.0, subset="neg")
"""
# check parameters' types
if not (type(sigma_min) == float or isinstance(sigma_min, np.float64)):
raise CorrModelError(self.whichBins, 'Incorrect type of the parameter "sigma_min".')
if not (type(sigma_max) == float or isinstance(sigma_max, np.float64)):
raise CorrModelError(self.whichBins, 'Incorrect type of the parameter "sigma_max".')
if not (type(rmin) == float or isinstance(rmin, np.float64)):
raise CorrModelError(self.whichBins, 'Incorrect type of the parameter "rmin".')
if not (type(subset) == str):
raise CorrModelError(self.whichBins, 'Incorrect type of the parameter "subset".')
if not (subset == "pos" or subset == "neg" or subset == "all"):
raise CorrModelError(self.whichBins, 'Incorrect type of the parameter "subset".')
if not (type(rebinned) == bool):
raise CorrModelError(self.whichBins, 'Incorrect type of the parameter "rebinned".')
# check parameters consistency
if not (sigma_max > sigma_min):
raise CorrModelError(self.whichBins, 'sigma_max is lower than sigma_min')
if not (rmin >= 0.0):
raise CorrModelError(self.whichBins, 'rmin is lower than 0')
if rebinned:
if hasattr(self, '_grid_sigma_mat_new') and hasattr(self, '_grid_pi_mat_new'):
if subset == "all":
return np.where((self._grid_sigma_mat_new >= sigma_min) & (self._grid_sigma_mat_new < sigma_max) & (self._grid_sigma_mat_new*self._grid_sigma_mat_new + self._grid_pi_mat_new*self._grid_pi_mat_new >= rmin*rmin))
elif subset == "pos":
return np.where((self._grid_sigma_mat_new >= sigma_min) & (self._grid_sigma_mat_new < sigma_max) & (self._grid_pi_mat_new >= 0) & (self._grid_sigma_mat_new*self._grid_sigma_mat_new + self._grid_pi_mat_new*self._grid_pi_mat_new >= rmin*rmin))
else:
return np.where((self._grid_sigma_mat_new >= sigma_min) & (self._grid_sigma_mat_new < sigma_max) & (self._grid_pi_mat_new <= 0) & (self._grid_sigma_mat_new*self._grid_sigma_mat_new + self._grid_pi_mat_new*self._grid_pi_mat_new >= rmin*rmin))
if subset == "all":
return np.where((self._grid_sigma_mat >= sigma_min) & (self._grid_sigma_mat < sigma_max) & (self._grid_sigma_mat*self._grid_sigma_mat + self._grid_pi_mat*self._grid_pi_mat >= rmin*rmin))
elif subset == "pos":
return np.where((self._grid_sigma_mat >= sigma_min) & (self._grid_sigma_mat < sigma_max) & (self._grid_pi_mat >= 0) & (self._grid_sigma_mat*self._grid_sigma_mat + self._grid_pi_mat*self._grid_pi_mat >= rmin*rmin))
else:
return np.where((self._grid_sigma_mat >= sigma_min) & (self._grid_sigma_mat < sigma_max) & (self._grid_pi_mat <= 0) & (self._grid_sigma_mat*self._grid_sigma_mat + self._grid_pi_mat*self._grid_pi_mat >= rmin*rmin))
def index_mat(self, rebinned = False):
"""
Returns a copy of _index_mat
FUNCTION: CorrModel.index_mat
TYPE: Access
PURPOSE: Returns a copy of _index_mat
KEYWORD ARGUMENTS:
rebinned: if True, returns a copy of _index_mat_new instead. If _index_mat_new does not exist,
then this flag is ignored
RETURNS: A copy of _model_mat
EXAMPLES:
model_mat = model.index_mat()
model_mat = model.index_mat(rebinned=True)
"""
if rebinned:
if hasattr(self, '_index_mat_new'):
return np.copy(self._index_mat_new)
return np.copy(self._index_mat)
def grid_pi_mat(self, rebinned = False):
"""
Returns a copy of _grid_pi_mat
FUNCTION: CorrModel.grid_pi_mat
TYPE: Access
PURPOSE: Returns a copy of _grid_pi_mat
KEYWORD ARGUMENTS:
rebinned: if True, returns a copy of _grid_pi_mat_new instead. If _grid_pi_mat_new does
not exist, then this flag is ignored
RETURNS: A copy of _grid_pi_mat
EXAMPLES:
grid_pi_mat = model.grid_pi_mat()
grid_pi_mat = model.grid_pi_mat(rebinned=True)
"""
if rebinned:
if hasattr(self, '_grid_pi_mat_new'):
return np.copy(self._grid_pi_mat_new)
return np.copy(self._grid_pi_mat)
def grid_sigma_mat(self, rebinned = False):
"""
Returns a copy of _grid_sigma_mat
FUNCTION: CorrModel.grid_sigma_mat
TYPE: Access
PURPOSE: Returns a copy of _grid_sigma_mat
KEYWORD ARGUMENTS:
rebinned: if True, returns a copy of _grid_sigma_mat_new instead. If _grid_sigma_mat_new does
not exist, then this flag is ignored
RETURNS: A copy of _grid_sigma_mat
EXAMPLES:
grid_sigma_mat = model.grid_sigma_mat()
grid_sigma_mat = model.grid_sigma_mat(rebinned=True)
"""
if rebinned:
if hasattr(self, '_grid_sigma_mat_new'):
return np.copy(self._grid_sigma_mat_new)
return np.copy(self._grid_sigma_mat)
def model_mat(self, rebinned = False):
"""
Returns a copy of _model_mat
FUNCTION: CorrModel.model_mat
TYPE: Access
PURPOSE: Returns a copy of _model_mat
KEYWORD ARGUMENTS:
rebinned: if True, returns a copy of _model_mat_new instead. If _model_mat_new does not exist,
then this flag is ignored
RETURNS: A copy of _model_mat
EXAMPLES:
model_mat = model.model_mat()
model_mat = model.model_mat(rebinned=True)
"""
if rebinned:
if hasattr(self, '_model_mat_new'):
return np.copy(self._model_mat_new)
return np.copy(self._model_mat)
class CorrModelError(Exception):
"""
CorrModel Exception
CLASS: CorrMOdelError
PURPOSE: CorrModel Exception
PUBLIC FUNCTIONS:
__init__(filename, pi, sigma)
__str__()
PRIVATE VARIABLES (should not be accessed outside the class body):
_method (CorrData method): Mehtod of CorrModel that produced the error
_message (string): Error message
"""
def __init__(self, method, message):
"""
Initialize class instance
FUNCTION: CorrModelError.__init__
TYPE: Constructor, public
PURPOSE: Initialize class instance
ARGUMENTS:
method (CorrData method): Mehtod of CorrModel that produced the error
message (string): The error message
RETURNS:
A initialized instance of CorrDataError
EXAMPLES:
error = CorrModelError("this is an error")
"""
self._method = method.__name__
self._message = message
def __str__(self):
"""
Returns a printable representation of the error message
FUNCTION: CorrModelError.__str__
TYPE: Public
PURPOSE: Returns a printable representation of the error message
RETURNS:
A printable representation of the error message
EXAMPLES:
print CorrModelError("this is a warning")
"""
return 'In method {method}: {message}'.format(method=self._method, message=repr(self._message))
class CorrelationProcessError(Exception):
"""
Handles exceptions caused by the functions in the module CorrelationProcess.
CLASS: CorrelationProcessError
PURPOSE:
Handles exceptions caused by the functions in the module CorrelationProcess.
The errors caused inside the classes of this module have their own Exception
PUBLIC FUNCTIONS:
__init__(self, filename, pi, sigma)
__str__(self)
PRIVATE VARIABLES (should not be accessed outside the class body):
_method (module function): Function that produced the error
_message (string): Error message
"""
def __init__(self, function, message):
"""
Initialize class instance
FUNCTION: CorrelationProcessError.__init__
TYPE: Constructor, public
PURPOSE: Initialize class instance
ARGUMENTS:
method (module function): Function that produced the error
message (string): The error message
RETURNS:
A initialized instance of CorrelationProcessError
EXAMPLES:
error = CorrDataError("this is an error")
"""
self._function = function.__name__
self._message = message
def __str__(self):
"""
Returns a printable representation of the error message
FUNCTION: CorrelationProcessError.__str__
TYPE: Public
PURPOSE: Returns a printable representation of the error message
RETURNS:
A printable representation of the error message
EXAMPLES:
print CorrelationProcessError("this is a warning")
"""
return 'In function {function}: {message}'.format(function=self._function, message=repr(self._message))
class CorrProcessWarning(Warning):
"""
Manage warnings related to functions in the module CorrelationProcess.
CLASS: CorrProcessWarning
PURPOSE:
Manage warnings related to functions in the module CorrelationProcess.
PUBLIC FUNCTIONS:
__init__(filename, method, message)
__str__()
PRIVATE VARIABLES (should not be accessed outside the class body):
_method (module function): Function that produced the warning
_message (string): Warning message
"""
def __init__(self, method, message):
"""
Initialize class instance
FUNCTION: CorrProcessWarning.__init__
TYPE: Constructor, public
PURPOSE:
Initialize class instance
ARGUMENTS:
method (module function): Function that produced the warning
message (string): The warning message
RETURNS:
A initialized instance of CorrDataWarning
EXAMPLES:
warning = CorrDataWarning("this is a warning")
"""
self._method = method.__name__
self._message = message
def __str__(self):
"""
Returns a printable representation of the warning message
FUNCTION: CorrProcessWarning.__str__
TYPE: Public
PURPOSE:
Returns a printable representation of the warning message
RETURNS:
A printable representation of the warning message
EXAMPLES:
print CorrProcessWarning("this is a warning")
"""
return 'In method {method}: {message}'.format(method=self._method, message=repr(self._message))
def computeCovMatFromPlates(filename="my_measurement.cov", path_to_covariance_plates="./", save_to="../"):
"""
Computes the covariance matrix from the covariance matrixes computed in the different plates
FUNCTION: computeCovMatFromPlates
TYPE: Regular function
PURPOSE:
Computes the covariance matrix from the covariance matrixes computed in the different plates.
More specifically reads the list of plates that were computed, loads all the covariance matrixes,
and computes their inverse. The inverse of the full covariance matrix is computed as the sum
of all the inverses of the covariance matrixes in each of the plates.
The function assumes that the covariance matrixes for each of the plates are located in the same
folder where the code is being executed, and saves the full covariance matrix in its parent folder.
This specifications may change upon options (see KEYWORD_ARGUMENTS), but all the files to be read
must be in the same directory
KEYWORD_ARGUMENTS:
filename (string): Name of the file where the covariance matrix will be saved.
Extension must be ".cov" -- Default: "my_measurement.cov"
path_to_covariance_plates (string): The name of the path where the covariance matrix files for the
measurement on the different plates are found. -- Default: "./"
save_to (string): Name of the path where the full covariance matrix will be saved.
-- Default: "../"
EXCEPTION_SAFETY:
Raises a CorrelationProcessError instance if the arguments are of incorrect type
EXAMPLES:
computeCovMatFromPlates()
computeCovMatFromPlates(filename="my_measurement.cov")
computeCovMatFromPlates(path_to_covariance_plates="./")
computeCovMatFromPlates(filename="my_measurement.cov", path_to_covariance_plates="./")
computeCovMatFromPlates(save_to="../")
computeCovMatFromPlates(filename="my_measurement.cov", save_to="../")
computeCovMatFromPlates(path_to_covariance_plates="./", save_to="../")
computeCovMatFromPlates(filename="my_measurement.cov", path_to_covariance_plates="./", save_to="../")
"""
# check parameters' types
if not (type(filename == str)):
raise CorrelationProcessError(computeCovMatFromPlates, 'Incorrect type of the parameter "filename".')
if not (filename.endwith(".cov")):
raise CorrelationProcessError(computeCovMatFromPlates, 'Incorrect type of the parameter "filename".')
if not (type(path_to_covariance_plates) == str):
raise CorrelationProcessError(computeCovMatFromPlates, 'Incorrect type of the parameter "path_to_covariance_plates".')
if not (type(save_to) == str):
raise CorrelationProcessError(computeCovMatFromPlates, 'Incorrect type of the parameter "save_to".')
# get plates list
plate_filename_list = [file for file in os.listdir(path_to_covariance_plates) if file.endwith(".cov")]
plate_filename_list_size = len(plate_filename_list)
# loop over plates
for count, plate_filename in enumerate(plate_filename_list):
if count % 100 == 0:
print "loadded {:d} plates out of {:d}".format(count, plate_filename_list_size)
# load inverse covariance matrixes from file
inv_cov_mat_plate = invCovMatFromPlate(plate_filename)
# add to inverse full covariance matrix
if count == 0:
inv_cov_mat = np.copy(inv_cov_mat_plate)
else:
inv_cov_mat += inv_cov_mat_plate
# compute the full covariance matrix
try:
cov_mat = np.linalg.inv(inv_cov_mat)
except LinAlgError:
warnings.warn(CorrProcessWarning(computeCovMatFromPlates, 'The full covariance matrix was singular, attemping \
to invert it ignoring lines full of zeros. Consider adding more plates to the \
average'))
remove_lines = np.array([index for index, item in enumerate(inv_cov_mat) if np.unique(item) == 0.0])
keep_lines = np.array([index for index in range(cov_mat.shape[0]) if index not in remove_lines])
inv_cov_mat_reduced = inv_cov_mat[np.meshgrid(keep_lines, keep_lines)]
# compute inverse
cov_mat_reduced = np.linalg.inv(inv_cov_mat_reduced)
# reintroduce lines and columns full of zeros
cov_mat = np.zeros_like(inv_cov_mat)
cov_mat[np.meshgrid(keep_lines, keep_lines)] = cov_mat_reduced
# save the full covariance matrix
file = open(save_to + filename, "w")
for i in range(cov_mat.size[0]):
for j in range(i, cov_mat.size[1]):
file.write("{:d} {:d} {:f}\n".format(i, j, cov_mat[i,j]))
file.close()
return
def computeDistMatFromPlates(filename="my_measurement.dmat", path_to_covariance_plates="./", save_to="../"):
"""
Computes the distortion matrix from the distortion matrixes computed in the different plates
FUNCTION: computeDistMatFromPlates
TYPE: Regular function
PURPOSE:
Computes the distortion matrix from the distortion matrixes computed in the different plates.
More specifically reads the list of plates that were computed and loads all the distortion matrixes.
The full distortion matrix is computed as the sum of all the distortion matrixes in each of the plates,
and is then normalized according to the total weight
The function assumes that the distortion matrixes for each of the plates are located in the same
folder where the code is being executed, and saves the full distortion matrix in its parent folder.
This specifications may change upon options (see KEYWORD_ARGUMENTS) but all the files to be read
must be in the same directory
KEYWORD_ARGUMENTS:
filename (string): Name of the file where the distortion matrix will be saved.
Extension must be ".dmat" -- Default: "my_measurement.dmat"
path_to_covariance_plates (string): The name of the path where the distortion matrix files for the
measurement on the different plates are found. -- Default: "./"
save_to (string): Name of the path where the full distortion matrix will be saved.
-- Default: "../"
EXCEPTION_SAFETY:
Raises a CorrelationProcessError instance if the arguments are of incorrect type
EXAMPLES:
computeDistMatFromPlates()
computeDistMatFromPlates(filename="my_measurement.dmat")
computeDistMatFromPlates(path_to_covariance_plates="./")
computeDistMatFromPlates(filename="my_measurement.dmat", path_to_covariance_plates="./")
computeDistMatFromPlates(save_to="../")
computeDistMatFromPlates(filename="my_measurement.dmat", save_to="../")
computeDistMatFromPlates(path_to_covariance_plates="./", save_to="../")
computeDistMatFromPlates(filename="my_measurement.dmat", path_to_covariance_plates="./", save_to="../")
"""
# check parameters' types
if not (type(filename == str)):
raise CorrelationProcessError(computeDistMatFromPlates, 'Incorrect type of the parameter "filename".')
if not (filename.endwith(".dmat")):
raise CorrelationProcessError(computeDistMatFromPlates, 'Incorrect type of the parameter "filename".')
if not (type(path_to_covariance_plates) == str):
raise CorrelationProcessError(computeDistMatFromPlates, 'Incorrect type of the parameter "path_to_covariance_plates".')
if not (type(save_to) == str):
raise CorrelationProcessError(computeDistMatFromPlates, 'Incorrect type of the parameter "save_to".')
# get plates list
plate_filename_list = [file for file in os.listdir(path_to_covariance_plates) if file.endwith(".dmat")]
plate_filename_list_size = len(plate_filename_list)
# loop over plates
for count, plate_filename in enumerate(plate_filename_list):
if count % 100 == 0:
print "loadded {:d} plates out of {:d}".format(count, plate_filename_list_size)
# load distortion matrix from file
dist_mat_plate, weight_plate = distMatFromPlate(filename)
# add to full distortion matrix
if count == 0:
dist_mat = np.copy(dist_mat_plate)
weight = np.copy(weight_plate)
else:
inv_cov_mat["value"] += inv_cov_mat_plate["value"]
weight["value"] += weight_plate["value"]
# normalize the full distortion matrix
warn = False
for index in dist_mat["index1"]:
w = weight[np.where(dist_mat["index1" == index])]["value"]
if w.size == 1 and w != 0.0:
dist_mat[np.where(dist_mat["index1" == index])]["value"] /= w
elif w.size != 1:
raise CorrProcessError(computeDistMatFromPlates, 'Encountered zero or more than one values for the normalization factor for \
bin {}. At least one of the distortion matrix files is not properly formatted'.format(index))
else:
warn = True
if warn:
warnings.warn(CorrProcessWarning(computeDistMatFromPlates, 'The full distortion matrix contains bins with no information. \
Consider adding more plates to the average'))
# save the full distortion matrix
file = open(save_to + filename, "w")
for i in range(dist_mat.size[0]):
for j in range(i, dist_mat.size[1]):
file.write("{:d} {:d} {:f}\n".format(i, j, dist_mat[i,j]))
file.close()
return
def distMatFromPlate(filename):
"""
Reads the distortion matrix in a specific plate
FUNCTION: distMatFromPlate
TYPE: Regular function
PURPOSE:
Reads the distortion matrix in a specific plate and returns its values and weight.
ARGUMENTS:
filename (string): The name of the file containing the distortion matrix. Extension
must be ".dmaat"
RETURNS:
The distortion matrix and its weight
EXCEPTION_SAFETY:
Raises a CorrelationProcessError instance if the arguments are of incorrect type
EXAMPLES:
dist_mat_plate, weight_plate = distMatFromPlate("plate_3678.dmat")
"""
# check parameters' types
if not (type(filename) == str):
raise CorrelationProcessError(distMatFromPlate, 'Incorrect type of the parameter "filename".')
if not (filename.endwith(".dmat")):
raise CorrelationProcessError(distMatFromPlate, 'Incorrect type of the parameter "filename".')
dist_mat = []
weight = []
read_dmat = False
read_weight = False
for line in open(filename).xreadlines():
if line.startswith("#"):
if line.endwith("dmat"):
read_dmat = True
read_weight = False
elif line.endwith("weight"):
read_dmat = False
read_weight = True
continue
if read_dmat:
dist_mat.append(line.split())
if read_weight:
weight.append(line.split())
dist_mat = np.array(dist_mat, dtype=[("index1", int), ("index2", int), ("value", float)])
weight = np.array(weight, dtype=[("index1", int), ("value", float)])
return dist_mat
def invCovMatFromPlate(filename):
"""
Reads the covariance matrix in a specific plate and returns its inverse
FUNCTION: invCovMatFromPlate
TYPE: Regular function
PURPOSE:
Reads the covariance matrix in a specific plate and returns its inverse. Whenever
there is no info from one of the bins (for example if there are no pairs that
contribute to bin 0), then this bin is not considered when computing the inverse
of the covariance matrix.
ARGUMENTS:
filename (string): The name of the file containing the covariance matrix to
compute the inverse from. Extensio must be ".cov"
RETURNS:
The inverse of the covariance matrix in the specified plate
EXCEPTION_SAFETY:
Raises a CorrelationProcessError instance if the arguments are of incorrect type
EXAMPLES:
inv_cov_mat = invCovMatFromPlate("plate_3678.cov")
"""
# check parameters' types
if not (type(filename) == str):
raise CorrelationProcessError(invCovMatFromPlate, 'Incorrect type of the parameter "filename".')
if not (filename.endwith(".cov")):
raise CorrelationProcessError(invCovMatFromPlate, 'Incorrect type of the parameter "filename".')
# read covariance matrix
cov = np.genfromtxt(filename, dtype=[("index1", int), ("index2", int), ("value", float)])
# format read data into a matrix
cov_mat = np.zeros((np.amax([cov['index1'], cov['index2']]) + 1, np.amax([cov['index1'], cov['index2']]) + 1))
for index1, index2, value in cov:
cov_mat[index1][index2] = value
cov_mat[index2][index1] = value
# remove lines and columns full of zeros
remove_lines = np.array([index for index, item in enumerate(cov_mat) if np.unique(item).size == item.size])
keep_lines = np.array([index for index in range(cov_mat.shape[0]) if index not in remove_lines])
cov_mat_reduced = cov_mat[np.meshgrid(keep_lines, keep_lines)]
# compute inverse
inv_cov_mat_reduced = np.linalg.inv(cov_mat_reduced)
# reintroduce lines and columns full of zeros
inv_cov_mat = np.zeros((np.amax([cov['index1'], cov['index2']]) + 1, np.amax([cov['index1'], cov['index2']]) + 1))
inv_cov_mat[np.meshgrid(keep_lines, keep_lines)] = inv_cov_mat_reduced
return inv_cov_mat
def plot(data_list, save_to, fmt_list = "k.", model_list=[], fmt_model_list = [], sigma_bins = None, contour = False, plot_rebinned_list = False, plot_model_rebinned_list = False, labels_list = None, labels_model_list = None, shifts_list = None, save_extension = 'eps', base_fig_name = 'cross_correlation', rmin_list= 10.0, smooth = True, plot_separated_errors = False, error_pos_list = [], single_plot=False, max_sigma_plot=40.0, max_pi_plot=60.0):
"""
Plots the cross-correlation
FUNCTION: plot
TYPE: Regular function
PURPOSE:
Plots the cross-correlation against parallel separation in different perpendicular
separation bins. If a list of sigma bins are not specified, then the sigma bins are
taken from the sigma_bins in the first dataset. If contour is set to True, then it
plots color-coded cross-correlation contours against parallel and perpendicular
separations. This function assumes that data and model instances have the same binning
independently of whether or not they are rebinned. Unexpected behaviour might occur if
this condition is not met.
ARGUMENTS:
data_list (CorrData, list of CorrData,
or tuple of CorrData): Data to plot. The format is either a
single CorrData instance or a list of
CorrData instances. If contour is set
to True, then it has to be a single
CorrData instance.
save_to (string): Directory where plots will be saved.
KEYWORD_ARGUMENTS:
fmt_list (string, list of strings,
or tuple of strings): Format in whih to plot the different
datsets. Its number of elements must be
equal to the number of elements in
data_list. Each element must contain a
valid matplotlib-format string. Ignored
if contour is set to True. -- Default: "ko"
model_list (CorrModel, list of CorrModel,
or tuple of CorrModel): Models to plot. The format is either a
single CorrModel instance or a list of
CorrModel instances. If contour is set
to True, then it has to be a single
CorrModel instance.
fmt_model_list (string, list of strings,
or tuple of strings): Format in whih to plot the different
models. Its number of elements must be
equal to the number of elements in
model_list. Each element must contain a
valid matplotlib-format string. Ignored
if contour is set to True.
sigma_bins (np.ndarray): Bins in parallel separation in which to
plot the cross-correlation. If None, then
the binning will be taken from the first
dataset -- Default: None
contour (bool): If True, plots the contour plots instead
of plots in different sigma bins, sigma_bins
is ignored, and only one dataset and one model
are allowed. Formats (fmt_list and fmt_model_list)
are also ignored. plot_rebinned_list and
plot_model_rebinned_list also have to contain a
single elements. Labels and shifts, if present,
are ignored. rmin_list is ignored -- Default: False
plot_rebinned_list (bool, list of bool,
tuple of bool): If True, plots the rebinned data of the
dataset. If a single value is passed for
multiple datasets, then this value will be
used for all of them. Otherwise there must be
as many values as datasets are provided
If the dataset has no rebinned data, the flag
is ignored. -- Default: False
plot_model_rebinned_list (bool,
list of bool, tuple of bool): If True, plots the rebinned arrays of the
model. If a single value is passed for
multiple models, then this value will be
used for all of them. Otherwise there must be
as many values as models are provided
If the model has no rebinned data, the flag
is ignored. Ignored if contour is set to True.
-- Default: False
labels_list (None, string, list of
strings, or tuple of strings): Name of the different datasets. Its number of
elements must be equal to the number of elements
in data_list. Otherwise it has to be None, and no
labels will be displayed on the plots.
labels_model_list (None, string,
list of strings, or tuple of
strings): Name of the different models. Its number of
elements must be equal to the number of elements
in model_list. Otherwise it has to be None, and no
labels will be displayed on the plots. Ignored if
contour is set to True.
shifts_list (None, float, list of
floats, or tuple of floats): Shifts to be applied to the different datasets.
Its number of elements must be equal to the number
of elements in data_list. Otherwise it has to be
None, and no shifts will be aplpied to the plots.
Ignored if contour is set to True.
save_extension (string): Image extension of the files (eps, png, ...)
--- Default: "eps"
base_fig_name (string): Image base name. Full name appends '_sigma_bin#.format',
where # is replaced by the corresponding bin number,
and format is replaced by the value stored in
save_extension. If contour is set to True, then full
name appends '_contour.format' instead.
--- Default: "cross_correlation"
rmin_list (float, int, list of floats
or ints, tuple of floats or ints): Minimum distances (in Mph/h) considered by the fitting
models. Its number of elements must be equal to the
number of elements in model_list. Otherwise it can
only have 1 element, which will be considered for all
the models. All values must be positive. Ignored if
contour is set to True. --- Default: 10.0
smooth (bool): If True, averages the datapoints at large distances.
For 16 < r <= 32 the average is performed with the
inmediately adjacent bins, and for 32 < r the average
is performed with the two adjacent bins. Otherwise does
nothing. Ignored if contour is set to False.
--- Default: True
plot_separated_errors (bool): If True, plots the average errors outside the plot
chart at the positions specified by error_pos_list.
The datapoints are plotted without errorbars. Otherwise
does nothing. Ignored if contour is set to True.
--- Default: False
error_pos_list (float, list of floats or
tuple of floats): Position to plot the average errorbars relative to the
axis size. Its number of elements must be equal to the
number of elements in data_list. All values must be between
0.0 and 1.0. Ignored if contour is set to True or if
plot_separated_errors is set to False. --- Default: []
single_plot (bool): If True, save all the plots into a single figure, in
as well as individually. Otherwise, just save them
individually. Ignored if contour is set to True.
Plots on a single figure ignore the value of
plot_separated_errors. --- Default: False
max_sigma_plot (float): Maximum perpendicular distance to plot when plotting
the contour plots. Must be positive and is ignored if
contour is set to False. --- Default: 60
max_pi_plot (float): Maximum parallel distance to plot when plotting
the contour plots. Must be positive and is ignored if
Ignored if contour is set to False. --- Default: 40
EXCEPTION SAFETY:
Raises a CorrelationProcessError instance if the arguments are of incorrect type,
or arguments are not consistent. Assumes formats are given in valid matplotlib-format
strings.
EXAMPLES:
plot(data, "./")
plot(data, "./", fmt_list="k.", sigma_bins=sigma, plot_rebinned_list=False, model_list = model, fmt_model_list = "k-", plot_model_rebinned_list=False, base_fig_name="cross_correlation_not_rebinned", rmin=5)
plot(data, "./", fmt_list="k.", sigma_bins=sigma_new, plot_rebinned_list=True, model_list = model, fmt_model_list = "k-", plot_model_rebinned_list = True, rmin=5)
plot(data, "./", model_list=model, plot_rebinned_list=False, plot_model_rebinned_list=False, contour=True)
"""
# check parameters' types
if not (isinstance(data_list, CorrData) or type(data_list) == list or type(data_list) == tuple):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "data_list".')
if type(data_list) == list or type(data_list) == tuple:
for item in data_list:
if not (isinstance(item, CorrData)):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "data_list".')
if not (type(save_to) == str):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "save_to".')
if not (type(fmt_list) == str or type(fmt_list) == list or type(fmt_list) == tuple or contour):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "fmt_list".')
if (type(fmt_list) == list or type(fmt_list) == tuple) and (not contour):
for item in fmt_list:
if not (type(item) == str):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "fmt_list".')
if not (isinstance(model_list, CorrModel) or type(model_list) == list or type(model_list) == tuple or contour):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "model_list".')
if (type(model_list) == list or type(model_list) == tuple) and (not contour):
for item in model_list:
if not (isinstance(item, CorrModel)):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "model_list".')
if not (type(fmt_model_list) == str or type(fmt_model_list) == list or type(fmt_model_list) == tuple or contour):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "fmt_model_list".')
if (type(fmt_model_list) == list or type(fmt_model_list) == tuple) and (not contour):
for item in fmt_model_list:
if not (type(item) == str):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "fmt_model_list".')
if not (sigma_bins == None or isinstance(sigma_bins, np.ndarray)):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "sigma_bins".')
if not (type(contour) == bool):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "contour".')
if not (type(plot_rebinned_list) == bool or type(plot_rebinned_list) == list or type(plot_rebinned_list) == tuple):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "plot_rebinned_list".')
if type(plot_rebinned_list) == list or type(plot_rebinned_list) == tuple:
for item in plot_rebinned_list:
if not (type(item) == bool):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "plot_rebinned_list".')
if not (type(plot_model_rebinned_list) == bool or type(plot_model_rebinned_list) == list or type(plot_model_rebinned_list) == tuple):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "plot_model_rebinned_list".')
if type(plot_model_rebinned_list) == list or type(plot_model_rebinned_list) == tuple:
for item in plot_model_rebinned_list:
if not (type(item) == bool):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "plot_model_rebinned_list".')
if not (labels_list == None or type(labels_list) == str or type(labels_list) == list or type(labels_list) == tuple):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "labels_list".')
if type(labels_list) == list or type(labels_list) == tuple:
for item in labels_list:
if not (type(item) == str):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "labels_list".')
if not (labels_model_list == None or type(labels_model_list) == str or type(labels_model_list) == list
or type(labels_model_list) == tuple or contour):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "labels_model_list".')
if type(labels_model_list) == list or type(labels_model_list) == tuple:
for item in labels_model_list:
if not (type(item) == str or contour):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "labels_model_list.')
if not (shifts_list == None or type(shifts_list) == float or type(shifts_list) == list or type(shifts_list) == tuple or contour):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "shifts_list".')
if type(shifts_list) == list or type(shifts_list) == tuple:
for item in shifts_list:
if not (type(item) == float or contour):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "shifts_list".')
if not (type(save_extension) == str):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "save_extension".')
if not (type(base_fig_name) == str):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "base_fig_name".')
if not (type(rmin_list) == float or type(rmin_list) == int or type(rmin_list) == list or type(rmin_list) == tuple or contour):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "rmin_list".')
if type(rmin_list) == list or type(rmin_list) == tuple:
for item in rmin_list:
if not (type(item) == int or type(item) == float or contour):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "rmin_list".')
if not (item >= 0.0 or contour):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "rmin_list".')
else:
if not (rmin_list >= 0.0 or contour):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "rmin_list".')
if not (type(smooth) == bool or (not contour)):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "smooth".')
if not (type(plot_separated_errors) == bool or contour):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "plot_separated_errors".')
if not (type(error_pos_list) == float or type(error_pos_list) == list or type(error_pos_list) == tuple or contour or (not plot_separated_errors)):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "error_pos_list".')
if (type(error_pos_list) == list or type(error_pos_list) == tuple):
for item in error_pos_list:
if not (type(item) == float or contour or (not plot_separated_errors)):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "error_pos_list".')
if not (type(single_plot) == bool):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "single_plot".')
if not (type(max_sigma_plot) == float):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "max_sigma_plot".')
if not (type(max_pi_plot) == float):
raise CorrelationProcessError(plot, 'Incorrect type of the parameter "max_pi_plot".')
# check parameters' consistency
try:
if contour:
# convert all relevant parameters to elements if necessary
if type(data_list) == list or type(data_list) == tuple:
if not (len(data_list) == 1):
raise CorrelationProcessError(plot, 'Parameter "data_list" is not consistent with the requirements.')
data_list = data_list[0]
if type(plot_rebinned_list) == list or type(plot_rebinned_list) == tuple:
if not (len(plot_rebinned_list) == 1):
raise CorrelationProcessError(plot, 'Parameter "plot_rebinned_list" is not consistent with the requirements.')
plot_rebinned_list == plot_rebinned_list[0]
if type(model_list) == list or type(model_list) == tuple:
if not (len(model_list) == 1):
raise CorrelationProcessError(plot, 'Parameter "model_list" is not consistent with the requirements.')
model_list = model_list[0]
if type(plot_model_rebinned_list) == list or type(plot_model_rebinned_list) == tuple:
if not (len(plot_model_rebinned_list) == 1):
raise CorrelationProcessError(plot, 'Parameter "plot_model_rebinned_list" is not consistent with the requirements.')
plot_model_rebinned_list == plot_model_rebinned_list[0]
if max_sigma_plot <= 0.0:
raise CorrelationProcessError(plot, 'Parameter "max_sigma_plot" is not consistent with the requirements.')
if max_pi_plot <= 0.0:
raise CorrelationProcessError(plot, 'Parameter "max_pi_plot" is not consistent with the requirements.')
else:
# convert all relevant parameters to one element lists if necessary
if isinstance(data_list, CorrData):
data_list = [data_list]
if type(fmt_list) == str:
fmt_list = [fmt_list]
if isinstance(model_list, CorrModel):
model_list = [model_list]
if type(fmt_model_list) == str:
fmt_model_list = [fmt_model_list]
if type(plot_rebinned_list) == bool:
plot_rebinned_list = [plot_rebinned_list]
if type(plot_model_rebinned_list) == bool:
plot_model_rebinned_list = [plot_model_rebinned_list]
if type(labels_list) == str:
labels_list = [labels_list]
if type(labels_model_list) == str:
labels_model_list = [labels_model_list]
if type(shifts_list) == float:
shifts_list = [shifts_list]
if type(rmin_list) == int or type(rmin_list) == float:
rmin_list = [float(rmin_list)]
if type(rmin_list) == tuple or type(rmin_list) == list:
rmin_list = [float(item) for item in rmin_list]
if type(error_pos_list) == float and plot_separated_errors:
error_pos_list = [error_pos_list]
# check that all lists have the appropiate length and correct for it in special cases
# (check parameters description)
assert len(fmt_list) == 1 or len(fmt_list) == len(data_list)
if len(fmt_list) == 1:
fmt_list = [fmt_list[0]] * len(data_list)
assert len(fmt_model_list) == 1 or len(fmt_model_list) == len(model_list)
if len(fmt_model_list) == 1:
fmt_model_list = [fmt_model_list[0]] * len(model_list)
assert len(plot_rebinned_list) == 1 or len(plot_rebinned_list) == len(data_list)
if len(plot_rebinned_list) == 1:
plot_rebinned_list = [plot_rebinned_list[0]] * len(data_list)
assert len(plot_model_rebinned_list) == 1 or len(plot_model_rebinned_list) == len(model_list)
if len(plot_model_rebinned_list) == 1:
plot_model_rebinned_list = [plot_model_rebinned_list[0]] * len(model_list)
assert labels_list == None or len(labels_list) == 1 or len(labels_list) == len(data_list)
if labels_list != None and len(labels_list) == 1:
labels_list = [labels_list[0]] * len(data_list)
assert labels_model_list == None or len(labels_model_list) == 1 or len(labels_model_list) == len(model_list)
if labels_model_list != None and len(labels_model_list) == 1:
labels_model_list = [labels_model_list[0]] * len(model_list)
assert shifts_list == None or len(shifts_list) == 1 or len(shifts_list) == len(shifts_list)
if shifts_list != None and len(shifts_list) == 1:
shifts_list = [shifts_list[0]] * len(data_list)
elif shifts_list == None:
shifts_list = [0.0] * len(data_list)
if sigma_bins == None:
sigma_bins = data[0].sigma(rebinned=plot_rebinned_list[0])
assert len(rmin_list) == 1 or len(rmin_list) == len(model_list)
if len(rmin_list) == 1:
rmin_list = [rmin_list[0]] * len(model_list)
if plot_separated_errors:
assert len(error_pos_list) == len(data_list)
except AssertionError:
raise CorrelationProcessError(plot, 'Given parameters are not consistent.')
if contour:
# get pi and sigma limits from the CorrData instance
pi_bins = data_list.pi(rebinned=plot_rebinned_list)
sigma_bins = data_list.sigma(rebinned=plot_rebinned_list)
# compute the middle point in the bins
pi_mid_bins = np.array([(pi_bins[i]+pi_bins[i + 1])/2.0 for i in range(pi_bins.size - 1)])
sigma_mid_bins = np.array([(sigma_bins[i]+sigma_bins[i + 1])/2.0 for i in range(sigma_bins.size - 1)])
# compute the two-dimensional shape of the matrixes
shape = (pi_mid_bins.size, sigma_mid_bins.size)
# format the data and error matrixes to a two-dimensional array
data_values = data_list.data_mat(rebinned=plot_rebinned_list)
if not (shape[0]*shape[1] == data_values.size):
raise CorrelationProcessError(plot, '"data" does not have the right number of elements, expected: {}, found: {}. If using the rebinning, check that all the relevant matrixes were stored'.format(shape[0]*shape[1], data_values.size))
data_values = data_values.reshape(shape)
error_values = data_list.error(rebinned=plot_rebinned_list)
if not (shape[0]*shape[1] == error_values.size):
raise CorrelationProcessError(plot, '"error" does not have the right number of elements, expected: {}, found: {}. If using the rebinning, check that all the relevant matrixes were stored'.format(shape[0]*shape[1], error_values.size))
error_values = error_values.reshape(shape)
# fill the voids in the model
aux = 0
model_values = []
for (index, model) in zip(model_list.index_mat(rebinned=plot_model_rebinned_list), model_list.model_mat(rebinned=plot_model_rebinned_list)):
while (aux < index):
model_values.append(np.nan)
aux += 1
model_values.append(model)
aux += 1
while aux < shape[0]*shape[1]:
model_values.append(np.nan)
aux += 1
# format the model matrix to a two-dimensional array
model_values = np.array(model_values).reshape(shape)
# smooth the datapoints
if smooth:
# average at large distances
data_averaged = np.copy(data_values)
for pi_index, pi in enumerate(pi_mid_bins):
for sigma_index, sigma in enumerate(sigma_mid_bins):
if np.isnan(data_values[pi_index][sigma_index]):
continue
r2 = pi*pi+sigma*sigma
# do nothing for r <= 16
if r2 <= 16.0*16.0:
pass
# for 16 < r <= 32 we average with the inmediately adjacent bins; weight the bins with 1/(error+0.05)
elif r2 <= 32.0*32.0:
v = 0.0
e = 0.0
for index1 in range(-1, 2):
for index2 in range(-1,2):
try:
if (not np.isnan(data_values[pi_index + index1][sigma_index + index2])) and (not np.isnan(error_values[pi_index + index1][sigma_index + index2])):
v += data_values[pi_index + index1][sigma_index + index2]/(error_values[pi_index + index1][sigma_index + index2]+0.05)
e += 1.0/(error_values[pi_index + index1][sigma_index + index2]+0.05)
except IndexError:
pass
if e > 0.0:
v /= e
if v != 0.0:
data_averaged[pi_index][sigma_index] = v
# for 32 < r we average with the two adjacent bins; weight the bins with 1/(error+0.05)
else:
v = 0.0
e = 0.0
for index1 in range(-2,3):
for index2 in range(-2,3):
try:
if (not np.isnan(data_values[pi_index + index1][sigma_index + index2])) and (not np.isnan(error_values[pi_index + index1][sigma_index + index2])):
v += data_values[pi_index + index1][sigma_index + index2]/(error_values[pi_index + index1][sigma_index + index2]+0.05)
e += 1.0/(error_values[pi_index + index1][sigma_index + index2]+0.05)
except IndexError:
pass
if e > 0.0:
v /= e
if v != 0.0:
data_averaged[pi_index][sigma_index] = v
# figure settings
cmap = plt.cm.get_cmap('CMRmap')
num_colors = 40.0
vmin = np.amin(data_values)
vmax = np.amax(data_values)
step = (vmax - vmin) / num_colors
levels = np.arange(vmin, vmax + step, step)
#gs = gridspec.GridSpec(2, 3, width_ratios=[10, 10, 1], height_ratios=[2, 1])
gs = gridspec.GridSpec(1, 3, width_ratios=[10, 10, 1])
gs.update(bottom=0.2, wspace=0.05, hspace=0.08)
fontsize = 32
labelsize = 24
labelsize2 = 18
figsize=(18, 14)
#figsize=(20, 20)
# plot the data
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(gs[0,0])
ax.set_xlabel('$r_{\\perp} {\\rm \\left[h^{-1}Mpc\\right]}$', fontsize=fontsize)
ax.set_ylabel('$r_{\\parallel} {\\rm \\left[h^{-1}Mpc\\right]}$', fontsize=fontsize)
ax.tick_params(axis='both', pad=10, labelsize=labelsize, width=2, length=6, top=True, right=True)
if smooth:
cs = ax.contourf(sigma_mid_bins, pi_mid_bins, data_averaged, levels, cmap=cmap, vmin=vmin, vmax=vmax, fontsize=labelsize)
else:
cs = ax.contourf(sigma_mid_bins, pi_mid_bins, data_values, levels, cmap=cmap, vmin=vmin, vmax=vmax, fontsize=labelsize)
ax.set_xlim(0, max_pi_plot)
ax.set_ylim(-max_sigma_plot, max_sigma_plot)
xticks = ax.xaxis.get_major_ticks()
xticks[0].label1.set_visible(False)
xticks[-1].label1.set_visible(False)
ax2 = fig.add_subplot(gs[0,1])
ax2.set_xlabel('$r_{\\perp} {\\rm \\left[h^{-1}Mpc\\right]}$', fontsize=fontsize)
ax2.tick_params(axis='x', pad=10, labelsize=labelsize, width=2, length=6, top=True)
ax2.tick_params(axis='y', labelleft='off', width=2, length=6, right=True)
cs2 = ax2.contourf(sigma_mid_bins, pi_mid_bins, model_values, levels, cmap=cmap, vmin=vmin, vmax=vmax, fontsize=labelsize)
ax2.set_xlim(0, max_pi_plot)
ax2.set_ylim(-max_sigma_plot, max_sigma_plot)
xticks2 = ax2.xaxis.get_major_ticks()
xticks2[0].label1.set_visible(False)
xticks2[-1].label1.set_visible(False)
ax3 = fig.add_subplot(gs[0,2])
cbar = fig.colorbar(cs, cax=ax3, format='%.2f')
cbar.ax.tick_params(axis='both', labelsize=labelsize, width=2, length=6, pad=10)
# save the plot
fig.savefig('{}{}_contour.{}'.format(save_to, base_fig_name, save_extension))
plt.close(fig)
# plot the cross-correlation in different sigma bins
else:
# figure settings
if plot_separated_errors:
gs = gridspec.GridSpec(1, 2, width_ratios=[10,1])
gs.update(left=0.25, bottom=0.2, wspace=0.01)
else:
gs = gridspec.GridSpec(1, 1)
gs.update(left=0.25, bottom=0.2)
major_locator = MultipleLocator(20)
minor_locator = MultipleLocator(10)
fontsize = 32
labelsize = 24
figsize = (9, 7)
# single figure settings
if single_plot:
figsize_singleplot = (18, 7*sigma_bins.size//2)
gs_singleplot = gridspec.GridSpec(sigma_bins.size//2, 2)
gs_singleplot.update(left=0.15, bottom=0.15, hspace=0, wspace=0.3)
fig_singleplot = plt.figure(figsize=figsize_singleplot)
axes_singleplot = np.empty((sigma_bins.size//2, 2), dtype=matplotlib.axes.Axes)
for bin_num, sigma_min in enumerate(sigma_bins[:-1]):
sigma_max = sigma_bins[bin_num + 1]
print 'plotting sigma bin {}'.format(bin_num)
pos_list = [ data.whichBins(sigma_min, sigma_max, rebinned=plot_rebinned) for data, plot_rebinned in zip(data_list, plot_rebinned_list) ]
# check if the model is continuus or discontinuus in this region
model_pos_list = []
for model, plot_rebinned, rmin in zip(model_list, plot_model_rebinned_list, rmin_list):
if sigma_min >= rmin:
model_pos_list.append([model.whichBins(sigma_min, sigma_max, rmin, rebinned=plot_rebinned, subset="all")])
else:
model_pos_list.append([model.whichBins(sigma_min, sigma_max, rmin, rebinned=plot_rebinned, subset="pos") ,
model.whichBins(sigma_min, sigma_max, rmin, rebinned=plot_rebinned, subset="neg")])
# plot the data
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(gs[0])
# if plot_separated_errors is set to True plot datapoints without errorbars and plot the errors at the specified positions
if plot_separated_errors:
if labels_list == None:
[ ax.plot(data.grid_pi_mat(rebinned=plot_rebinned)[pos] + shift, data.data_mat(rebinned=plot_rebinned)[pos], fmt) for data, fmt, plot_rebinned, pos, shift in zip(data_list, fmt_list, plot_rebinned_list, pos_list, shifts_list) ]
else:
[ ax.plot(data.grid_pi_mat(rebinned=plot_rebinned)[pos] + shift, data.data_mat(rebinned=plot_rebinned)[pos], fmt, label=label) for data, fmt, plot_rebinned, label, pos, shift in zip(data_list, fmt_list, plot_rebinned_list, labels_list, pos_list, shifts_list) ]
# compute errors average and plot them at the specified positions
mean_error_list = [np.sum(data.error(rebinned=plot_rebinned)[pos])/data.error(rebinned=plot_rebinned)[pos].size for data, plot_rebinned, pos in zip(data_list, plot_rebinned_list, pos_list)]
ax2 = fig.add_subplot(gs[1])
[ax2.errorbar(pos, 0.2, yerr=mean_error/(ax.get_ylim()[1]-ax.get_ylim()[0]), fmt=fmt, transform=ax2.transAxes) for mean_error, fmt, pos in zip(mean_error_list, fmt_list, error_pos_list)]
ax2.tick_params(axis='both', bottom='off', left='off', top='off', right='off')
ax2.axis('off')
ax2.tick_params(axis='both', bottom='off', top='off', left='off', right='off', labelbottom='off', labeltop='off', labelleft='off', labelright='off')
ax2.set_ylim(ax.get_ylim())
# otherwise plot datapoints with their errorbars
else:
if labels_list == None:
[ ax.errorbar(data.grid_pi_mat(rebinned=plot_rebinned)[pos] + shift, data.data_mat(rebinned=plot_rebinned)[pos], yerr=data.error(rebinned=plot_rebinned)[pos], fmt=fmt) for data, fmt, plot_rebinned, pos, shift in zip(data_list, fmt_list, plot_rebinned_list, pos_list, shifts_list) ]
else:
[ ax.errorbar(data.grid_pi_mat(rebinned=plot_rebinned)[pos] + shift, data.data_mat(rebinned=plot_rebinned)[pos], yerr=data.error(rebinned=plot_rebinned)[pos], fmt=fmt, label=label) for data, fmt, plot_rebinned, label, pos, shift in zip(data_list, fmt_list, plot_rebinned_list, labels_list, pos_list, shifts_list) ]
# plot the model
if labels_model_list == None:
for model, fmt, plot_rebinned, model_pos in zip(model_list, fmt_model_list, plot_model_rebinned_list, model_pos_list):
[ax.plot(model.grid_pi_mat(rebinned=plot_rebinned)[model_pos_item], model.model_mat(rebinned=plot_rebinned)[model_pos_item], fmt) for model_pos_item in model_pos]
else:
for model, fmt, plot_rebinned, model_pos, label in zip(model_list, fmt_model_list, plot_model_rebinned_list, model_pos_list, labels_model_list):
ax.plot(model.grid_pi_mat(rebinned=plot_rebinned)[model_pos[0]], model.model_mat(rebinned=plot_rebinned)[model_pos[0]], fmt, label=label)
[ ax.plot(model.grid_pi_mat(rebinned=plot_rebinned)[model_pos_item], model.model_mat(rebinned=plot_rebinned)[model_pos_item], fmt) for model_pos_item in model_pos[1:] ]
ax.set_xlabel('$r_{\\parallel}\\,\\left[\\rm h^{-1}Mpc\\right]$', fontsize=fontsize)
ax.set_ylabel('$\\xi\\left[r_{\\parallel}, r_{\\perp}\\right]$', fontsize=fontsize)
ax.text(0.05, 0.05, '$' + str(sigma_min) + ' < r_{\\perp} <' + str(sigma_max) + '$', fontsize=labelsize, transform=ax.transAxes)
ax.tick_params(axis='both', pad=10, labelsize=labelsize, size=labelsize//2, top='on', right='on')
ax.xaxis.set_minor_locator(minor_locator)
ax.xaxis.set_major_locator(major_locator)
yticks = ax.yaxis.get_major_ticks()
yticks[0].label1.set_visible(False)
if labels_list != None or labels_model_list != None:
ax.legend(numpoints=1, loc=4)
# save the plot
fig.savefig('{}{}_sigma_bin_{}.{}'.format(save_to, base_fig_name, bin_num, save_extension))
plt.close(fig)
# plot all of the plots into a single figure if necessary
if single_plot:
# determine position in the figure
subplot_row = bin_num//2
subplot_column = bin_num % 2
axes_singleplot[subplot_row, subplot_column] = fig_singleplot.add_subplot(gs_singleplot[subplot_row, subplot_column])
# plot data
if labels_list == None:
[ axes_singleplot[subplot_row, subplot_column].errorbar(data.grid_pi_mat(rebinned=plot_rebinned)[pos] + shift, data.data_mat(rebinned=plot_rebinned)[pos], yerr=data.error(rebinned=plot_rebinned)[pos], fmt=fmt) for data, fmt, plot_rebinned, pos, shift in zip(data_list, fmt_list, plot_rebinned_list, pos_list, shifts_list) ]
else:
[ axes_singleplot[subplot_row, subplot_column].errorbar(data.grid_pi_mat(rebinned=plot_rebinned)[pos] + shift, data.data_mat(rebinned=plot_rebinned)[pos], yerr=data.error(rebinned=plot_rebinned)[pos], fmt=fmt, label=label) for data, fmt, plot_rebinned, label, pos, shift in zip(data_list, fmt_list, plot_rebinned_list, labels_list, pos_list, shifts_list) ]
# plot model
if labels_model_list == None:
for model, fmt, plot_rebinned, model_pos in zip(model_list, fmt_model_list, plot_model_rebinned_list, model_pos_list):
[axes_singleplot[subplot_row, subplot_column].plot(model.grid_pi_mat(rebinned=plot_rebinned)[model_pos_item], model.model_mat(rebinned=plot_rebinned)[model_pos_item], fmt) for model_pos_item in model_pos]
else:
for model, fmt, plot_rebinned, model_pos, label in zip(model_list, fmt_model_list, plot_model_rebinned_list, model_pos_list, labels_model_list):
axes_singleplot[subplot_row, subplot_column].plot(model.grid_pi_mat(rebinned=plot_rebinned)[model_pos[0]], model.model_mat(rebinned=plot_rebinned)[model_pos[0]], fmt, label=label)
[ axes_singleplot[subplot_row, subplot_column].plot(model.grid_pi_mat(rebinned=plot_rebinned)[model_pos_item], model.model_mat(rebinned=plot_rebinned)[model_pos_item], fmt) for model_pos_item in model_pos[1:] ]
if subplot_row == axes_singleplot.shape[0] - 1:
axes_singleplot[subplot_row, subplot_column].set_xlabel('$r_{\\parallel}\\,\\left[\\rm h^{-1}Mpc\\right]$', fontsize=fontsize)
axes_singleplot[subplot_row, subplot_column].tick_params(axis='both', pad=10, labelsize=labelsize, size=labelsize//2, top='on', right='on', direction='inout')
else:
axes_singleplot[subplot_row, subplot_column].tick_params(axis='both', pad=10, labelsize=labelsize, size=labelsize//2, top='on', right='on', labelbottom='off', direction='inout')
if subplot_column == 0:
axes_singleplot[subplot_row, subplot_column].set_ylabel('$\\xi\\left[r_{\\parallel}, r_{\\perp}\\right]$', fontsize=fontsize)
axes_singleplot[subplot_row, subplot_column].text(0.05, 0.05, '$' + str(sigma_min) + ' < r_{\\perp} <' + str(sigma_max) + '$', fontsize=labelsize, transform=axes_singleplot[subplot_row, subplot_column].transAxes)
axes_singleplot[subplot_row, subplot_column].yaxis.get_major_ticks()[0].label1.set_visible(False)
axes_singleplot[subplot_row, subplot_column].xaxis.set_minor_locator(minor_locator)
axes_singleplot[subplot_row, subplot_column].xaxis.set_major_locator(major_locator)
if labels_list != None or labels_model_list != None:
axes_singleplot[subplot_row, subplot_column].legend(numpoints=1, loc=4)
if single_plot:
fig_singleplot.savefig('{}{}_sigma_bins_all.{}'.format(save_to, base_fig_name, save_extension))
plt.close(fig_singleplot)
def rebinIgnoringCovMat(data, pi, sigma):
"""
Rebins the data to the specified binning ignoring the covariance matrix
FUNCTION: rebinIgnoringCovMat
TYPE: Regular function
PURPOSE:
Rebins the data to the specified binning ignoring the covariance matrix
The rebinning is performed as follows
The covariance matrix is rebinned as
C_{new}^{-1} = S^{t} S,
where S is the specified transformation matrix.
The data is rebinned as
data_{new} = C_{new} S^{t} data
ARGUMENTS:
data (CorrData): Instance of CorrData containing the data to rebin
pi (np.ndarray): New binning for parallel
sigma (np.ndarray): New binning for perpendicular separation
RETURNS: The rebinned data matrix.
EXAMPLES:
data_mat_new = rebinIgnoringCovMat(data, np.array([-10, 10]), np.array([10, 20]))
"""
# check parameters type
if not (isinstance(data, CorrData)):
raise CorrelationProcessError(rebinIgnoringCovMat, 'Incorrect type of the parameter "data".')
if not (isinstance(pi, np.ndarray)):
raise CorrelationProcessError(rebinIgnoringCovMat, 'Incorrect type of the parameter "pi".')
if not (isinstance(sigma, np.ndarray)):
raise CorrelationProcessError(rebinIgnoringCovMat, 'Incorrect type of the parameter "sigma".')
data_mat = data.data_mat()
# compute the transformation matrix
transformation_matrix = data.computeTransformationMatrix(pi, sigma)
# assume identity as original covariance matrix and rebin it
inv_cov_mat_new = np.dot(transformation_matrix.transpose(), transformation_matrix)
cov_mat_new = np.linalg.inv(inv_cov_mat_new)
# rebin data
data_mat_new = np.dot(cov_mat_new, np.dot(transformation_matrix.transpose(), data_mat))
return data_mat_new
def showDocumentation(fnc):
"""
Shows the documentation for the selected items(s)
FUNCTION: showDocumentation
TYPE: Regular function
PURPOSE:
Shows the documentation for the selected items(s). Items may be classes, methods,
or functions. If 'all' is passed, prints the documentation of all the classes,
mehtods and functions. If 'module' is passed, prints the module documentation
ARGUMENTS:
fnc (list or tuple of objects,
objects, 'all' or 'module'): Function(s) the documentation of which is to
be printed
EXAMPLES:
showDocumentation(CorrData)
showDocumentation(CorrData.rebinData)
showDocumentation(showDocumentation)
showDocumentation([CorrData, CorrData.rebinData, showDocumentation])
showDocumentation((CorrData, corrData.rebinData, showDocumentation))
showDocumentation("module")
showDocumentation("all")
"""
# check parameters' type
if type(fnc) == tuple or type(fnc) == list:
for item in fnc:
if not (inspect.isfunction(item) or inspect.isclass(item) or inspect.ismethod(item)):
raise CorrelationProcessError(showDocumentation, 'Incorrect type of the parameter "fnc".')
elif type(fnc) == str:
if not (fnc == 'all' or fnc == 'module'):
raise CorrelationProcessError(showDocumentation, 'Incorrect type of the parameter "fnc".')
else:
if not (inspect.isfunction(fnc) or inspect.isclass(fnc) or inspect.ismethod(fnc)):
raise CorrelationProcessError(showDocumentation, 'Incorrect type of the parameter "fnc".')
# print the entire documentation
if fnc == 'all':
module = sys.modules[__name__]
functions_list = inspect.getmembers(module, inspect.isfunction)
for item in functions_list:
print '\nDocumentation for {}:\n'.format(item[0])
print trim(item[1].__doc__)
print '\n'
classes_list = inspect.getmembers(module, inspect.isclass)
for class_name, class_object in classes_list:
print '\nDocumentation for {}:\n'.format(class_name)
print trim(class_object.__doc__)
print '\n'
methods_list = inspect.getmembers(class_object, inspect.ismethod)
for item in methods_list:
print '\nDocumentation for {}.{}:\n'.format(class_name, item[0])
print trim(item[1].__doc__)
print '\n'
# print the module documentation
elif fnc == 'module':
module = sys.modules[__name__]
print '\nDocumentation for module {}:\n'.format(module.__name__)
print trim(module.__doc__)
print '\n'
# print the documentation for the selected classes, functions, and methods
elif type(fnc) == tuple or type(fnc) == list:
for item in fnc:
if hasattr(item, 'im_class'):
print '\nDocumentation for {}.{}:\n'.format(item.im_class.__name__, item.__name__)
else:
print '\nDocumentation for {}:\n'.format(item.__name__)
print trim(item.__doc__)
print '\n'
else:
if hasattr(fnc, 'im_class'):
print '\nDocumentation for {}.{}:\n'.format(fnc.im_class.__name__, fnc.__name__)
else:
print '\nDocumentation for {}:\n'.format(fnc.__name__)
print trim(fnc.__doc__)
print '\n'
def trim(docstring):
"""
Format the given docstring to be readable
FUNCTION: showDocumentation
TYPE: Regular function
PURPOSE:
Docstring processing tools will strip a uniform amount of indentation from the second
and further lines of the docstring, equal to the minimum indentation of all non-blank
lines after the first line. Any indentation in the first line of the docstring (i.e.,
up to the first newline) is insignificant and removed except for a single indentation
block. Relative indentation of later lines in the docstring is retained. Blank lines
should be removed from the beginning and end of the docstring.
Adapted from https://www.python.org/dev/peps/pep-0257/
ARGUMENTS:
docstring (string): The docstring to be formattted
EXAMPLES:
trim(object.__doc__)
"""
# check parameters' type
if not (type(docstring) == str):
raise CorrelationProcessError(trim, 'Incorrect type of the parameter "docstring".')
if not docstring:
return ''
lines = docstring.expandtabs().splitlines()
indent = sys.maxint
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
trimmed = [lines[0].strip()]
if indent < sys.maxint:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
return ' ' + '\n '.join(trimmed)
def testRebinning(num_test_groups):
"""
Test the rebinning performed by the functions CorrData.rebinData and rebinIgnoringCovMat
FUNCTION: testRebinning
TYPE: Test function
PURPOSE:
Test the rebinning performed by the functions CorrData.rebinData and rebinIgnoringCovMat.
To do so, the function uses the data found in 'test#.data', 'test#.cov', and 'test#.grid'
in folder 'test_rebinning/' (where # is an integer going from 1 to num_test_groups, both
included). The function performs the rebinnings proposed in 'test#.rebin' and compares
the results against 'test#.sol'.
The format of 'test#.data', 'test#.cov', and 'test#.grid' are the same as the
cross-correlation output files with the same extension. The file 'test#.grid' may or may
not be missing.
The files 'test#.rebin' specifies the tests to be applied to the group. It must contain
the following:
- 1st line: Binning in parallel and perpendicular separations. The format is to have 'pi'
followed by the limits on the parallel separation, then have 'sigma' followed
by the limits on the perpendicular separation. Separator between elements must
be a single white space. Example:
'pi -1.0 0.0 1.0 sigma 0.0 1.0 2.0'
- rest of the lines: Name of the test, and the binning in parallel and perpendicular
separation the data is to be rebinned to. The format is the same as
the first line, preceded by the name of the test and a white space.
The name of the test must be 'RI#' to test the function
rebinIgnoringCovMat and 'R#' to test the function CorrData.rebinData.
Examples:
'RI1 pi -1.0 1.0 sigma 0.0 2.0' - to test rebinIgnoringCovMat
'R1 pi -1.0 1.0 sigma 0.0 2.0' - to test CorrData.rebinData
The file 'test#.sol' must contain the results of the proposed tests in the same order.
The results of each test are a line starting with the name of the test, the resulting
data matrix, and the resulting covariance matrix (the last one only if function
CorrData.rebinData is being tested). The format is to put the name followed by a white
space, then 'data' followed by the data matrix, then 'cov' followed by the covariance
matrix elements printed by columns. Numbers must contain 4 decimal digits. Keep in mind
that lines must not end with a white space, and that no new line must be found at end of
file. Examples:
'RI2 data 52.5000 22.5000'
'R2 data 30.1449 50.0489 cov 2.0017 -0.0450 -0.0450 1.5010'
The function will write a 'test#.res' file with the result. If this file is identical
to 'test#.sol', all the tests will have passed. Line-to-line differences show the test
that have not passed. Note that function will consider only 4 decimal places in the
comparison with the provided solution.
ARGUMENTS:
num_tests_groups (int): Number of tests groups to analyze. Must be at least 1.
RETURNS: The rebinned data matrix.
EXAMPLES:
testRebinning(3)
"""
# check parameters' type
if not (type(num_test_groups) == int):
raise CorrelationProcessError(testRebinning, 'Incorrect type of the parameter "num_test_groups".')
print '\n\nTesting the functions CorrData.rebinData and rebinIgnoringCovMat.'
print 'There are {} test groups specified\n'.format(num_test_groups)
for test_group in range(1, num_test_groups + 1):
# load binning informationfrom *.rebin file
try:
lines = open('test_rebinning/test{}.rebin'.format(test_group)).readlines()
except IOError:
raise CorrelationProcessError(testRebinning, 'missing test{}.rebin'.format(test_group))
cols = lines[0].split()
cols_index = 0
if cols[cols_index] == 'pi':
pi = []
cols_index += 1
else:
raise CorrelationProcessError(testRebinning, 'test{}.rebin is not properly structured'.format(test_group))
try:
while cols[cols_index] != 'sigma':
pi.append(float(cols[cols_index]))
cols_index += 1
except IndexError:
raise CorrelationProcessError(testRebinning, 'test{}.rebin is not properly structured'.format(test_group))
sigma = []
cols_index += 1
while cols_index < len(cols):
sigma.append(float(cols[cols_index]))
cols_index += 1
pi = np.array(pi)
sigma = np.array(sigma)
# load data
test_data = CorrData('test_rebinning/test{}'.format(test_group), pi, sigma)
# preform tests
tests = []
for line in lines[1:]:
cols = line.split()
try:
assert cols[0].startswith('R') > 0 and (cols[0][1] == 'I' or cols[0][1].isdigit())
except AssertionError:
raise CorrelationProcessError(testRebinning, 'test{}.rebin is not properly structured'.format(test_group))
cols_index = 1
if cols[cols_index] == 'pi':
pi = []
cols_index += 1
else:
raise CorrelationProcessError(testRebinning, 'test{}.rebin is not properly structured'.format(test_group))
try:
while cols[cols_index] != 'sigma':
pi.append(float(cols[cols_index]))
cols_index += 1
except IndexError:
raise CorrelationProcessError(testRebinning, 'test{}.rebin is not properly structured'.format(test_group))
sigma = []
cols_index += 1
while cols_index < len(cols):
sigma.append(float(cols[cols_index]))
cols_index += 1
pi = np.array(pi)
sigma = np.array(sigma)
tests.append((cols[0], np.copy(pi), np.copy(sigma)))
# save results to disk
results = []
for name, pi, sigma in tests:
if name[1].isdigit():
transf_matrix = test_data.computeTransformationMatrix(pi, sigma)
cov_mat_new, data_mat_new, tmp, tmp, tmp = test_data.rebinData(transf_matrix)
result = '{} data'.format(name)
for item in data_mat_new:
result += ' {:.4f}'.format(item)
result += ' cov'
for items in cov_mat_new:
for item in items:
result += ' {:.4f}'.format(item)
results.append(result)
else:
data_mat_new = rebinIgnoringCovMat(test_data, pi, sigma)
result = '{} data'.format(name)
for item in data_mat_new:
result += ' {:.4f}'.format(item)
results.append(result)
output_file = open('test_rebinning/test{}.res'.format(test_group), 'w')
output_file.write('\n'.join(results))
output_file.close()
# compare the results against the provided solution
try:
lines_solution = open('test_rebinning/test{}.sol'.format(test_group)).readlines()
except IOError:
raise CorrelationProcessError(testRebinning, 'missing test{}.sol'.format(test_group))
lines_results = open('test_rebinning/test{}.res'.format(test_group)).readlines()
diff = [ line for line in difflib.unified_diff(lines_solution, lines_results, fromfile='test{}.sol'.format(test_group), tofile='test{}.res'.format(test_group), lineterm='') ]
if len(diff) == 0:
print 'Test group {} status: PASSED'.format(test_group)
else:
print 'Test group {} status: FAILED\nDifferences:'.format(test_group)
for line in diff:
if line.startswith('+') > 0 or line.startswith('-') > 0:
print line
# what to do if the module is not imported but directly executed
if __name__ == '__main__':
testRebinning(6)
|
mit
|
asimshankar/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py
|
10
|
70021
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNLinearCombinedEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column_lib as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
def _assert_metrics_in_range(keys, metrics):
epsilon = 0.00001 # Added for floating point edge cases.
for key in keys:
estimator_test_utils.assert_in_range(0.0 - epsilon, 1.0 + epsilon, key,
metrics)
class _CheckCallsHead(head_lib.Head):
"""Head that checks whether head_ops is called."""
def __init__(self):
self._head_ops_called_times = 0
@property
def logits_dimension(self):
return 1
def create_model_fn_ops(
self, mode, features, labels=None, train_op_fn=None, logits=None,
logits_input=None, scope=None):
"""See `_Head`."""
self._head_ops_called_times += 1
loss = losses.mean_squared_error(labels, logits)
return model_fn.ModelFnOps(
mode,
predictions={'loss': loss},
loss=loss,
train_op=train_op_fn(loss),
eval_metric_ops={'loss': loss})
@property
def head_ops_called_times(self):
return self._head_ops_called_times
class _StepCounterHook(session_run_hook.SessionRunHook):
"""Counts the number of training steps."""
def __init__(self):
self._steps = 0
def after_run(self, run_context, run_values):
del run_context, run_values
self._steps += 1
@property
def steps(self):
return self._steps
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'dnn_feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn_linear_combined._dnn_linear_combined_model_fn(features, labels,
model_fn.ModeKeys.TRAIN,
params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'dnn_feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep language embeddings constant, whereas wire
# embeddings will be trained.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
with ops.Graph().as_default():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
training_util.create_global_step()
model_ops = dnn_linear_combined._dnn_linear_combined_model_fn(
features, labels, model_fn.ModeKeys.TRAIN, params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
language_initial_value = sess.run(language_var)
for _ in range(2):
_, language_value = sess.run([model_ops.train_op, language_var])
self.assertAllClose(language_value, language_initial_value)
# We could also test that wire_value changed, but that test would be flaky.
class DNNLinearCombinedEstimatorTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedEstimator)
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedEstimator(
head=_CheckCallsHead(),
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testCheckCallsHead(self):
"""Tests binary classification using matrix data as input."""
head = _CheckCallsHead()
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [feature_column.bucketized_column(
cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10))]
estimator = dnn_linear_combined.DNNLinearCombinedEstimator(
head,
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
estimator.fit(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(1, head.head_ops_called_times)
estimator.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(2, head.head_ops_called_times)
estimator.predict(input_fn=test_data.iris_input_multiclass_fn)
self.assertEqual(3, head.head_ops_called_times)
class DNNLinearCombinedClassifierTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedClassifier)
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testNoDnnHiddenUnits(self):
def _input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
with self.assertRaisesRegexp(
ValueError,
'dnn_hidden_units must be defined when dnn_feature_columns is '
'specified'):
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[age, language])
classifier.fit(input_fn=_input_fn, steps=2)
def testSyncReplicasOptimizerUnsupported(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
sync_optimizer = sync_replicas_optimizer.SyncReplicasOptimizer(
opt=adagrad.AdagradOptimizer(learning_rate=0.1),
replicas_to_aggregate=1,
total_num_replicas=1)
sync_hook = sync_optimizer.make_session_run_hook(is_chief=True)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=sync_optimizer)
with self.assertRaisesRegexp(
ValueError,
'SyncReplicasOptimizer is not supported in DNNLinearCombined model'):
classifier.fit(
input_fn=test_data.iris_input_multiclass_fn, steps=100,
monitors=[sync_hook])
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[embedding_language],
dnn_hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
dnn_feature_columns=feature_columns,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertTrue(callable(classifier.params['input_layer_partitioner']))
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(
iris.data[:, i], dtype=dtypes.float32), [-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(
iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column(str(i)) for i in range(4)
]
linear_features = [
feature_column.bucketized_column(cont_features[i],
test_data.get_quantile_based_buckets(
iris.data[:, i], 10))
for i in range(4)
]
linear_features.append(
feature_column.sparse_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testEstimatorWithCoreFeatureColumns(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(iris.data[:, i], dtype=dtypes.float32),
[-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [fc_core.numeric_column(str(i)) for i in range(4)]
linear_features = [
fc_core.bucketized_column(
cont_features[i],
sorted(set(test_data.get_quantile_based_buckets(
iris.data[:, i], 10)))) for i in range(4)
]
linear_features.append(
fc_core.categorical_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
embedding_features = [
feature_column.embedding_column(
sparse_features[0], dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=sparse_features,
dnn_feature_columns=embedding_features,
dnn_hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testMultiClass(self):
"""Tests multi-class classification using matrix data as input.
Please see testLogisticRegression_TensorData() for how to use Tensor
data as input instead.
"""
iris = base.load_iris()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=bucketized_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=[language_column],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
label_keys=label_keys)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
labels = constant_op.constant([[1], [0], [0], [0]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562
self.assertAlmostEqual(0.562, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06
self.assertAlmostEqual(1.06, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x).
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByObject(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1))
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByString(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer='Ftrl',
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad')
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByFunction(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
def _optimizer_exp_decay():
global_step = training_util.get_global_step()
learning_rate = learning_rate_decay.exponential_decay(
learning_rate=0.1,
global_step=global_step,
decay_steps=100,
decay_rate=0.001)
return adagrad.AdagradOptimizer(learning_rate=learning_rate)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=_optimizer_exp_decay,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=_optimizer_exp_decay)
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testPredict(self):
"""Tests weight column in evaluation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32)}
return features, labels
def _input_fn_predict():
y = input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32), num_epochs=1)
features = {'x': y}
return features
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=100)
probs = list(classifier.predict_proba(input_fn=_input_fn_predict))
self.assertAllClose([[0.75, 0.25]] * 4, probs, 0.05)
classes = list(classifier.predict_classes(input_fn=_input_fn_predict))
self.assertListEqual([0] * 4, classes)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Test the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
# Test the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testVariableQuery(self):
"""Tests get_variable_names and get_variable_value."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=500)
var_names = classifier.get_variable_names()
self.assertGreater(len(var_names), 3)
for name in var_names:
classifier.get_variable_value(name)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[
feature_column.real_valued_column('age'),
language,
],
dnn_feature_columns=[
feature_column.embedding_column(
language, dimension=1),
],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
classifier.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=True)
classifier.fit(input_fn=_input_fn_train, steps=1000)
self.assertIn('binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
# logodds(0.75) = 1.09861228867
self.assertAlmostEqual(
1.0986,
float(classifier.get_variable_value(
'binary_logistic_head/centered_bias_weight')[0]),
places=2)
def testDisableCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=False)
classifier.fit(input_fn=_input_fn_train, steps=500)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testGlobalStepLinearOnly(self):
"""Tests global step update for linear-only model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNOnly(self):
"""Tests global step update for dnn-only model."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNLinearCombinedBug(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=False)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
global_step = classifier.get_variable_value('global_step')
if global_step == 100:
# Expected is 100, but because of the global step increment bug, is 50.
# Occasionally, step increments one more time due to a race condition,
# reaching 51 steps.
self.assertIn(step_counter.steps, [50, 51])
else:
# Occasionally, training stops when global_step == 102, due to a race
# condition. In addition, occasionally step increments one more time due
# to a race condition reaching 52 steps.
self.assertIn(step_counter.steps, [51, 52])
def testGlobalStepDNNLinearCombinedBugFixed(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=True)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testLinearOnly(self):
"""Tests that linear-only instantiation works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/age/weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/age/weight')))
self.assertEquals(
100, len(classifier.get_variable_value('linear/language/weights')))
def testLinearOnlyOneFeature(self):
"""Tests that linear-only instantiation works for one feature only."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 99)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/bias_weight')))
self.assertEquals(
99, len(classifier.get_variable_value('linear/language/weights')))
def testDNNOnly(self):
"""Tests that DNN-only instantiation works."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=1000)
classifier.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
self.assertNotIn('linear/bias_weight', variable_names)
self.assertNotIn('linear/feature_BUCKETIZED/weight', variable_names)
def testDNNWeightsBiasesNames(self):
"""Tests the names of DNN weights and biases in the checkpoints."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=5)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
class DNNLinearCombinedRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_logistic_fn, steps=10)
scores = regressor.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=1)
self.assertIn('loss', scores.keys())
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=10)
classifier.evaluate(input_fn=_input_fn, steps=1)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.2)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
regressor.predict_scores(input_fn=_input_fn, as_iterable=False)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor.predict_scores(input_fn=predict_input_fn, as_iterable=True)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testExport(self):
"""Tests export model for servo."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = _input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
regressor.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testTrainSaveLoad(self):
"""Tests regression with restarting training / evaluate."""
def _input_fn(num_epochs=None):
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {
'x':
input_lib.limit_epochs(
constant_op.constant([[100.], [3.], [2.], [2.]]),
num_epochs=num_epochs)
}
return features, labels
model_dir = tempfile.mkdtemp()
# pylint: disable=g-long-lambda
new_regressor = lambda: dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
model_dir=model_dir,
config=run_config.RunConfig(tf_random_seed=1))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor = new_regressor()
regressor.fit(input_fn=_input_fn, steps=10)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor = new_regressor()
predictions2 = list(regressor.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testLinearOnly(self):
"""Tests linear-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDNNOnly(self):
"""Tests DNN-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testNoneFeatureEngineeringFn(self):
def input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
def feature_engineering_fn(features, labels):
_, _ = features, labels
labels = constant_op.constant([[1000.], [30.], [20.], [20.]])
features = {'x': constant_op.constant([[1000.], [30.], [20.], [20.]])}
return features, labels
estimator_with_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1),
feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=110)
estimator_without_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
estimator_without_fe_fn.fit(input_fn=input_fn, steps=110)
# predictions = y
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(1000., prediction_with_fe_fn, delta=10.0)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(100., prediction_without_fe_fn, delta=1.0)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
mwv/scikit-learn
|
examples/svm/plot_svm_scale_c.py
|
223
|
5375
|
"""
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
|
bsd-3-clause
|
eladnoor/small-molecule-regulation
|
python/plot_figures_for_poster.py
|
1
|
3222
|
#*- coding: utf-8 -*-
"""
Created on Sun Oct 9 17:37:42 2016
@author: noore
"""
from plot_figures_for_paper import FigurePlotter, abs_eps_x_v, CONDITIONS
import matplotlib.pyplot as plt
import settings
import numpy as np
import seaborn as sns
sns.set(style='white')
if __name__ == "__main__":
fp = FigurePlotter()
fp.plot_figS5()
ki = fp.ki
ki = ki[ki['growth condition'].isin(CONDITIONS)]
#%%
fig, axs = plt.subplots(1, 1, figsize=(3, 3))
ax = axs
s_range = np.logspace(-3, 3, 1000) # 10 uM - 100 mM
interaction = 'pep:2.7.1.11'
ax.plot(s_range, list(map(abs_eps_x_v, s_range)), color='k', alpha=0.5,
zorder=1)
ax.set_title('inhibitors', fontsize=12)
ax.set_xscale('log')
ax.set_xlim(1e-3, 1e3)
ax.set_ylim(-1e-2, 1+1e-2)
met, ec = interaction.split(':')
ax.set_title('Inhibition of %s on %s' % (met.upper(), ec))
sat_vs_elast = ki.loc[ki['met:EC'] == interaction]
sat_vs_elast['abs(elasticity)'] = sat_vs_elast['elasticity'].abs()
sat_vs_elast['I_over_KI'] = sat_vs_elast['concentration']/sat_vs_elast['KI_Value']
sat_vs_elast.plot.scatter(x='I_over_KI', y='abs(elasticity)', ax=ax, alpha=1,
zorder=2)
for cond, row in sat_vs_elast.iterrows():
ax.text(1.2*row['I_over_KI'], row['abs(elasticity)'],
row['growth condition'], ha='left', va='center', fontsize=8)
ax.set_xlabel('inhibitor saturation $I/K_I$', fontsize=12)
ax.set_ylabel('$|\epsilon_v^x|$')
settings.savefig(fig, 'figP1')
# km_pivoted.index = km_pivoted.index.str.upper()
# ki_pivoted.index = ki_pivoted.index.str.upper()
#
# # keep and reorder only the conditions that were pre-selected
# km_pivoted = km_pivoted.loc[:, CONDITIONS]
# ki_pivoted = ki_pivoted.loc[:, CONDITIONS]
#
# fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(18, 30))
# sns.heatmap(km_pivoted, ax=ax0, mask=km_pivoted.isnull(),
# cbar=False, vmin=-1, vmax=1, cmap=settings.HEATMAP_COLORMAP, fmt='.2f')
# ax0.set_xticklabels(list(km_pivoted.columns), fontsize=12, rotation=90)
# ax0.set_yticklabels(reversed(km_pivoted.index), rotation=0, fontsize=6)
# ax0.set_title('substrates', fontsize=20)
# ax0.set_xlabel('growth condition', fontsize=16)
# ax0.set_ylabel('')
#
# clb1 = matplotlib.colorbar.make_axes(ax1)
# sns.heatmap(ki_pivoted, ax=ax1, mask=ki_pivoted.isnull(),
# cbar=True, vmin=-1, vmax=1, annot=True, cmap=settings.HEATMAP_COLORMAP,
# cbar_ax=clb1[0], fmt='.2f')
# ax1.set_xticklabels(list(ki_pivoted.columns), fontsize=12, rotation=90)
# ax1.set_title('inhibitors', fontsize=20)
# ax1.set_yticklabels(reversed(ki_pivoted.index),
# rotation=0, fontsize=10)
# ax1.set_xlabel('growth condition', fontsize=16)
# ax1.set_ylabel('')
# clb1[0].set_ylabel('elasticity', fontsize=16)
#
# settings.savefig(fig, 'figS5')
# km_pivoted.to_csv(os.path.join(settings.RESULT_DIR,
# 'km_elasticity_full.csv'))
# ki_pivoted.to_csv(os.path.join(settings.RESULT_DIR,
# 'ki_elasticity_full.csv'))
|
mit
|
wazeerzulfikar/scikit-learn
|
sklearn/manifold/tests/test_t_sne.py
|
3
|
29629
|
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.neighbors import BallTree
from sklearn.neighbors import NearestNeighbors
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _joint_probabilities_nn
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _kl_divergence_bh
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold import _barnes_hut_tsne
from sklearn.manifold._utils import _binary_search_perplexity
from sklearn.datasets import make_blobs
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from sklearn.metrics.pairwise import pairwise_distances
x = np.linspace(0, 1, 10)
xx, yy = np.meshgrid(x, x)
X_2d_grid = np.hstack([
xx.ravel().reshape(-1, 1),
yy.ravel().reshape(-1, 1),
])
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_binary_search_neighbors():
# Binary perplexity search approximation.
# Should be approximately equal to the slow method when we use
# all points as neighbors.
n_samples = 500
desired_perplexity = 25.0
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
P1 = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
# Test that when we use all the neighbors the results are identical
k = n_samples
neighbors_nn = np.argsort(distances, axis=1)[:, 1:k].astype(np.int64)
distances_nn = np.array([distances[k, neighbors_nn[k]]
for k in range(n_samples)])
P2 = _binary_search_perplexity(distances_nn, neighbors_nn,
desired_perplexity, verbose=0)
P_nn = np.array([P1[k, neighbors_nn[k]] for k in range(n_samples)])
assert_array_almost_equal(P_nn, P2, decimal=4)
# Test that the highest P_ij are the same when few neighbors are used
for k in np.linspace(80, n_samples, 5):
k = int(k)
topn = k * 10 # check the top 10 *k entries out of k * k entries
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
distances_nn = np.array([distances[k, neighbors_nn[k]]
for k in range(n_samples)])
P2k = _binary_search_perplexity(distances_nn, neighbors_nn,
desired_perplexity, verbose=0)
idx = np.argsort(P1.ravel())[::-1]
P1top = P1.ravel()[idx][:topn]
idx = np.argsort(P2k.ravel())[::-1]
P2top = P2k.ravel()[idx][:topn]
assert_array_almost_equal(P1top, P2top, decimal=2)
def test_binary_perplexity_stability():
# Binary perplexity search should be stable.
# The binary_search_perplexity had a bug wherein the P array
# was uninitialized, leading to sporadically failing tests.
k = 10
n_samples = 100
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
last_P = None
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
for _ in range(100):
P = _binary_search_perplexity(distances.copy(), neighbors_nn.copy(),
3, verbose=0)
P1 = _joint_probabilities_nn(distances, neighbors_nn, 3, verbose=0)
# Convert the sparse matrix to a dense one for testing
P1 = P1.toarray()
if last_P is None:
last_P = P
last_P1 = P1
else:
assert_array_almost_equal(P, last_P, decimal=4)
assert_array_almost_equal(P1, last_P1, decimal=4)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features).astype(np.float32)
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components).astype(np.float32)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
def fun(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[0]
def grad(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(50, n_components).astype(np.float32)
for init in ('random', 'pca'):
for method in methods:
tsne = TSNE(n_components=n_components, init=init, random_state=0,
method=method)
X_embedded = tsne.fit_transform(X)
t = trustworthiness(X, X_embedded, n_neighbors=1)
assert_greater(t, 0.9)
def test_optimization_minimizes_kl_divergence():
"""t-SNE should give a lower KL divergence with more iterations."""
random_state = check_random_state(0)
X, _ = make_blobs(n_features=3, random_state=random_state)
kl_divergences = []
for n_iter in [250, 300, 350]:
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
n_iter=n_iter, random_state=0)
tsne.fit_transform(X)
kl_divergences.append(tsne.kl_divergence_)
assert_less_equal(kl_divergences[1], kl_divergences[0])
assert_less_equal(kl_divergences[2], kl_divergences[1])
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0, method='exact')
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
for i in range(3):
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
early_exaggeration=2.0, metric="precomputed",
random_state=i, verbose=0)
X_embedded = tsne.fit_transform(D)
t = trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True)
assert t > .95
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_non_positive_precomputed_distances():
# Precomputed distance matrices must be positive.
bad_dist = np.array([[0., -1.], [1., 0.]])
for method in ['barnes_hut', 'exact']:
tsne = TSNE(metric="precomputed", method=method)
assert_raises_regexp(ValueError, "All distances .*precomputed.*",
tsne.fit_transform, bad_dist)
def test_non_positive_computed_distances():
# Computed distance matrices must be positive.
def metric(x, y):
return -1
tsne = TSNE(metric=metric, method='exact')
X = np.array([[0.0, 0.0], [1.0, 1.0]])
assert_raises_regexp(ValueError, "All distances .*metric given.*",
tsne.fit_transform, X)
def test_init_not_available():
# 'init' must be 'pca', 'random', or numpy array.
tsne = TSNE(init="not available")
m = "'init' must be 'pca', 'random', or a numpy array"
assert_raises_regexp(ValueError, m, tsne.fit_transform,
np.array([[0.0], [1.0]]))
def test_init_ndarray():
# Initialize TSNE with ndarray and test fit
tsne = TSNE(init=np.zeros((100, 2)))
X_embedded = tsne.fit_transform(np.ones((100, 5)))
assert_array_equal(np.zeros((100, 2)), X_embedded)
def test_init_ndarray_precomputed():
# Initialize TSNE with ndarray and metric 'precomputed'
# Make sure no FutureWarning is thrown from _fit
tsne = TSNE(init=np.zeros((100, 2)), metric="precomputed")
tsne.fit(np.zeros((100, 100)))
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available", method='exact')
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
tsne = TSNE(metric="not available", method='barnes_hut')
assert_raises_regexp(ValueError, "Metric 'not available' not valid.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_method_not_available():
# 'nethod' must be 'barnes_hut' or 'exact'
tsne = TSNE(method='not available')
assert_raises_regexp(ValueError, "'method' must be 'barnes_hut' or ",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_angle_out_of_range_checks():
# check the angle parameter range
for angle in [-1, -1e-6, 1 + 1e-6, 2]:
tsne = TSNE(angle=angle)
assert_raises_regexp(ValueError, "'angle' must be between 0.0 - 1.0",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_n_components_range():
# barnes_hut method should only be used with n_components <= 3
tsne = TSNE(n_components=4, method="barnes_hut")
assert_raises_regexp(ValueError, "'n_components' should be .*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_early_exaggeration_used():
# check that the ``early_exaggeration`` parameter has an effect
random_state = check_random_state(0)
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(25, n_components).astype(np.float32)
for method in methods:
tsne = TSNE(n_components=n_components, perplexity=1,
learning_rate=100.0, init="pca", random_state=0,
method=method, early_exaggeration=1.0)
X_embedded1 = tsne.fit_transform(X)
tsne = TSNE(n_components=n_components, perplexity=1,
learning_rate=100.0, init="pca", random_state=0,
method=method, early_exaggeration=10.0)
X_embedded2 = tsne.fit_transform(X)
assert not np.allclose(X_embedded1, X_embedded2)
def test_n_iter_used():
# check that the ``n_iter`` parameter has an effect
random_state = check_random_state(0)
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(25, n_components).astype(np.float32)
for method in methods:
for n_iter in [251, 500]:
tsne = TSNE(n_components=n_components, perplexity=1,
learning_rate=0.5, init="random", random_state=0,
method=method, early_exaggeration=1.0, n_iter=n_iter)
tsne.fit_transform(X)
assert tsne.n_iter_ == n_iter - 1
def test_answer_gradient_two_points():
# Test the tree with only a single set of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0]])
pos_output = np.array([[-4.961291e-05, -1.072243e-04],
[9.259460e-05, 2.702024e-04]])
neighbors = np.array([[1],
[0]])
grad_output = np.array([[-2.37012478e-05, -6.29044398e-05],
[2.37012478e-05, 6.29044398e-05]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_answer_gradient_four_points():
# Four points tests the tree with multiple levels of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[5.81128448e-05, -7.78033454e-06],
[-5.81526851e-05, 7.80976444e-06],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_skip_num_points_gradient():
# Test the kwargs option skip_num_points.
#
# Skip num points should make it such that the Barnes_hut gradient
# is not calculated for indices below skip_num_point.
# Aside from skip_num_points=2 and the first two gradient rows
# being set to zero, these data points are the same as in
# test_answer_gradient_four_points()
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[0.0, 0.0],
[0.0, 0.0],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output,
False, 0.1, 2)
def _run_answer_test(pos_input, pos_output, neighbors, grad_output,
verbose=False, perplexity=0.1, skip_num_points=0):
distances = pairwise_distances(pos_input).astype(np.float32)
args = distances, perplexity, verbose
pos_output = pos_output.astype(np.float32)
neighbors = neighbors.astype(np.int64)
pij_input = _joint_probabilities(*args)
pij_input = squareform(pij_input).astype(np.float32)
grad_bh = np.zeros(pos_output.shape, dtype=np.float32)
from scipy.sparse import csr_matrix
P = csr_matrix(pij_input)
neighbors = P.indices.astype(np.int64)
indptr = P.indptr.astype(np.int64)
_barnes_hut_tsne.gradient(P.data, pos_output, neighbors, indptr,
grad_bh, 0.5, 2, 1, skip_num_points=0)
assert_array_almost_equal(grad_bh, grad_output, decimal=4)
def test_verbose():
# Verbose options write to stdout.
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("nearest neighbors..." in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("early exaggeration" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
def test_no_sparse_on_barnes_hut():
# No sparse matrices allowed on Barnes-Hut.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_iter=199, method='barnes_hut')
assert_raises_regexp(TypeError, "A sparse matrix was.*",
tsne.fit_transform, X_csr)
def test_64bit():
# Ensure 64bit arrays are handled correctly.
random_state = check_random_state(0)
methods = ['barnes_hut', 'exact']
for method in methods:
for dt in [np.float32, np.float64]:
X = random_state.randn(50, 2).astype(dt)
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
random_state=0, method=method, verbose=0)
X_embedded = tsne.fit_transform(X)
effective_type = X_embedded.dtype
# tsne cython code is only single precision, so the output will
# always be single precision, irrespectively of the input dtype
assert effective_type == np.float32
def test_barnes_hut_angle():
# When Barnes-Hut's angle=0 this corresponds to the exact method.
angle = 0.0
perplexity = 10
n_samples = 100
for n_components in [2, 3]:
n_features = 5
degrees_of_freedom = float(n_components - 1.0)
random_state = check_random_state(0)
distances = random_state.randn(n_samples, n_features)
distances = distances.astype(np.float32)
distances = abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
params = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, perplexity, verbose=0)
kl_exact, grad_exact = _kl_divergence(params, P, degrees_of_freedom,
n_samples, n_components)
k = n_samples - 1
bt = BallTree(distances)
distances_nn, neighbors_nn = bt.query(distances, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
distances_nn = np.array([distances[i, neighbors_nn[i]]
for i in range(n_samples)])
assert np.all(distances[0, neighbors_nn[0]] == distances_nn[0]),\
abs(distances[0, neighbors_nn[0]] - distances_nn[0])
P_bh = _joint_probabilities_nn(distances_nn, neighbors_nn,
perplexity, verbose=0)
kl_bh, grad_bh = _kl_divergence_bh(params, P_bh, degrees_of_freedom,
n_samples, n_components,
angle=angle, skip_num_points=0,
verbose=0)
P = squareform(P)
P_bh = P_bh.toarray()
assert_array_almost_equal(P_bh, P, decimal=5)
assert_almost_equal(kl_exact, kl_bh, decimal=3)
@skip_if_32bit
def test_n_iter_without_progress():
# Use a dummy negative n_iter_without_progress and check output on stdout
random_state = check_random_state(0)
X = random_state.randn(100, 10)
for method in ["barnes_hut", "exact"]:
tsne = TSNE(n_iter_without_progress=-1, verbose=2, learning_rate=1e8,
random_state=0, method=method, n_iter=351, init="random")
tsne._N_ITER_CHECK = 1
tsne._EXPLORATION_N_ITER = 0
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the value of n_iter_without_progress
assert_in("did not make any progress during the "
"last -1 episodes. Finished.", out)
def test_min_grad_norm():
# Make sure that the parameter min_grad_norm is used correctly
random_state = check_random_state(0)
X = random_state.randn(100, 2)
min_grad_norm = 0.002
tsne = TSNE(min_grad_norm=min_grad_norm, verbose=2,
random_state=0, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
lines_out = out.split('\n')
# extract the gradient norm from the verbose output
gradient_norm_values = []
for line in lines_out:
# When the computation is Finished just an old gradient norm value
# is repeated that we do not need to store
if 'Finished' in line:
break
start_grad_norm = line.find('gradient norm')
if start_grad_norm >= 0:
line = line[start_grad_norm:]
line = line.replace('gradient norm = ', '').split(' ')[0]
gradient_norm_values.append(float(line))
# Compute how often the gradient norm is smaller than min_grad_norm
gradient_norm_values = np.array(gradient_norm_values)
n_smaller_gradient_norms = \
len(gradient_norm_values[gradient_norm_values <= min_grad_norm])
# The gradient norm can be smaller than min_grad_norm at most once,
# because in the moment it becomes smaller the optimization stops
assert_less_equal(n_smaller_gradient_norms, 1)
def test_accessible_kl_divergence():
# Ensures that the accessible kl_divergence matches the computed value
random_state = check_random_state(0)
X = random_state.randn(100, 2)
tsne = TSNE(n_iter_without_progress=2, verbose=2,
random_state=0, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the accessible kl_divergence as the error at
# the last iteration
for line in out.split('\n')[::-1]:
if 'Iteration' in line:
_, _, error = line.partition('error = ')
if error:
error, _, _ = error.partition(',')
break
assert_almost_equal(tsne.kl_divergence_, float(error), decimal=5)
def check_uniform_grid(method, seeds=[0, 1, 2], n_iter=1000):
"""Make sure that TSNE can approximately recover a uniform 2D grid"""
for seed in seeds:
tsne = TSNE(n_components=2, init='random', random_state=seed,
perplexity=10, n_iter=n_iter, method=method)
Y = tsne.fit_transform(X_2d_grid)
# Ensure that the convergence criterion has been triggered
assert tsne.n_iter_ < n_iter
# Ensure that the resulting embedding leads to approximately
# uniformly spaced points: the distance to the closest neighbors
# should be non-zero and approximately constant.
nn = NearestNeighbors(n_neighbors=1).fit(Y)
dist_to_nn = nn.kneighbors(return_distance=True)[0].ravel()
assert dist_to_nn.min() > 0.1
smallest_to_mean = dist_to_nn.min() / np.mean(dist_to_nn)
largest_to_mean = dist_to_nn.max() / np.mean(dist_to_nn)
try_name = "{}_{}".format(method, seed)
assert_greater(smallest_to_mean, .5, msg=try_name)
assert_less(largest_to_mean, 2, msg=try_name)
def test_uniform_grid():
for method in ['barnes_hut', 'exact']:
yield check_uniform_grid, method
def test_bh_match_exact():
# check that the ``barnes_hut`` method match the exact one when
# ``angle = 0`` and ``perplexity > n_samples / 3``
random_state = check_random_state(0)
n_features = 10
X = random_state.randn(30, n_features).astype(np.float32)
X_embeddeds = {}
n_iter = {}
for method in ['exact', 'barnes_hut']:
tsne = TSNE(n_components=2, method=method, learning_rate=1.0,
init="random", random_state=0, n_iter=251,
perplexity=30.0, angle=0)
# Kill the early_exaggeration
tsne._EXPLORATION_N_ITER = 0
X_embeddeds[method] = tsne.fit_transform(X)
n_iter[method] = tsne.n_iter_
assert n_iter['exact'] == n_iter['barnes_hut']
assert_array_almost_equal(X_embeddeds['exact'], X_embeddeds['barnes_hut'],
decimal=3)
|
bsd-3-clause
|
shoyer/xray
|
doc/gallery/plot_cartopy_facetgrid.py
|
3
|
1379
|
# -*- coding: utf-8 -*-
"""
==================================
Multiple plots and map projections
==================================
Control the map projection parameters on multiple axes
This example illustrates how to plot multiple maps and control their extent
and aspect ratio.
For more details see `this discussion`_ on github.
.. _this discussion: https://github.com/pydata/xarray/issues/1397#issuecomment-299190567
""" # noqa
from __future__ import division
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import xarray as xr
# Load the data
ds = xr.tutorial.load_dataset('air_temperature')
air = ds.air.isel(time=[0, 724]) - 273.15
# This is the map projection we want to plot *onto*
map_proj = ccrs.LambertConformal(central_longitude=-95, central_latitude=45)
p = air.plot(transform=ccrs.PlateCarree(), # the data's projection
col='time', col_wrap=1, # multiplot settings
aspect=ds.dims['lon'] / ds.dims['lat'], # for a sensible figsize
subplot_kws={'projection': map_proj}) # the plot's projection
# We have to set the map's options on all four axes
for ax in p.axes.flat:
ax.coastlines()
ax.set_extent([-160, -30, 5, 75])
# Without this aspect attributes the maps will look chaotic and the
# "extent" attribute above will be ignored
ax.set_aspect('equal', 'box-forced')
plt.show()
|
apache-2.0
|
HolgerPeters/scikit-learn
|
sklearn/tests/test_metaestimators.py
|
52
|
4990
|
"""Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba',
'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
|
bsd-3-clause
|
polyanskiy/refractiveindex.info-scripts
|
scripts/Mathar 2007 - Air 4.35-5.2um.py
|
1
|
2561
|
# -*- coding: utf-8 -*-
# Author: Mikhail Polyanskiy
# Last modified: 2017-11-17
# Original data: Mathar 2007, https://doi.org/10.1088/1464-4258/9/5/008
############################### 4.35-5.2 μm ###################################
import numpy as np
import matplotlib.pyplot as plt
π = np.pi
# adjustable parameters
T = 273.15+15 # Temperature: K
p = 101325 # Pressure: Pa
H = 0 # Humidity: 0-100%
# model parameters
cref = [ 0.200020e-3, 0.275346e-9, 0.325702e-12, -0.693603e-14, 0.285610e-17, 0.338758e-18] # cm^j
cT = [ 0.590035e-1, -0.375764e-6, 0.134585e-9, 0.124316e-11, 0.508510e-13, -0.189245e-15] # cm^j · K
cTT = [-4.09830, 0.250037e-2, 0.275187e-6, -0.653398e-8, -0.310589e-9, 0.127747e-11] # cm^j · K^2
cH = [-0.140463e-7, 0.839350e-11, -0.190929e-14, -0.121399e-16, -0.898863e-18, 0.364662e-20] # cm^j · %^-1
cHH = [ 0.543605e-12, 0.112802e-15, -0.229979e-19, -0.191450e-21, -0.120352e-22, 0.500955e-25] # cm^j · %^-2
cp = [ 0.266898e-8, 0.273629e-14, 0.463466e-17, -0.916894e-23, 0.136685e-21, 0.413687e-23] # cm^j · Pa^-1
cpp = [ 0.610706e-17, 0.116620e-21, 0.244736e-24, -0.497682e-26, 0.742024e-29, 0.224625e-30] # cm^j · Pa^-2
cTH = [ 0.674488e-4, -0.406775e-7, 0.289063e-11, 0.819898e-13, 0.468386e-14, -0.191182e-16] # cm^j · K · %^-1
cTp = [ 0.778627e-6, 0.593296e-12, 0.145042e-14, 0.489815e-17, 0.327941e-19, 0.128020e-21] # cm^j · K · Pa^-1
cHp = [-0.211676e-15, 0.487921e-20, -0.682545e-23, 0.942802e-25, -0.946422e-27, -0.153682e-29] # cm^j · %^-1 · Pa^-1
σref = 1e4/4.8 # cm^−1
Tref = 273.15+17.5 # K
pref = 75000 # Pa
Href = 10 #%
# model
def n(λ):
σ = 1e4/λ # cm^-1
n = 1
for j in range(0, 6):
n += ( cref[j] + cT[j]*(1/T-1/Tref) + cTT[j]*(1/T-1/Tref)**2
+ cH[j]*(H-Href) + cHH[j]*(H-Href)**2
+ cp[j]*(p-pref) + cpp[j]*(p-pref)**2
+ cTH[j]*(1/T-1/Tref)*(H-Href)
+ cTp[j]*(1/T-1/Tref)*(p-pref)
+ cHp[j]*(H-Href)*(p-pref) ) * (σ-σref)**j
return n
# output - modify code below the line to match your needs
###############################################################################
λ = np.arange(4.35, 5.201, 0.01)
n = n(λ)
# write data file
file = open('out.txt', 'w')
for i in range(0, len(λ)):
file.write('\n {:.2f} {:.12f}'.format(λ[i],n[i]))
file.close()
#plot n vs μm
plt.rc('font', family='Arial', size='14')
plt.figure()
plt.plot(λ, n-1)
plt.xlabel('Wavelength (μm)')
plt.ylabel('n-1')
|
gpl-3.0
|
bees4ever/spotpy
|
tests/test_analyser.py
|
1
|
16451
|
# -*- coding: utf-8 -*-
'''
Copyright (c) 2018 by Benjamin Manns
This file is part of Statistical Parameter Estimation Tool (SPOTPY).
:author: Tobias Houska, Benjamin Manns
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import spotpy
import unittest
import numpy as np
import spotpy.analyser
import os
import sys
from spotpy.examples.spot_setup_rosenbrock import spot_setup as rosenbrock_setup
from spotpy.examples.spot_setup_griewank import spot_setup as griewank_setup
from spotpy.examples.spot_setup_hymod_python import spot_setup as hymod_setup
class TestAnalyser(unittest.TestCase):
@classmethod
def setUpClass(self):
np.random.seed(42)
self.rep = 300
self.parallel = "seq"
self.dbformat = "ram"
self.timeout = 5
self.fig_name = 'test_output.png'
sampler = spotpy.algorithms.mc(rosenbrock_setup(),
sim_timeout=self.timeout)
sampler.sample(self.rep)
self.results = sampler.getdata()
sampler = spotpy.algorithms.mc(griewank_setup(),
sim_timeout=self.timeout)
sampler.sample(self.rep)
self.griewank_results = sampler.getdata()
if sys.version_info >= (3, 6): # FAST is only fully operational under
#Python 3
sampler = spotpy.algorithms.fast(rosenbrock_setup(),
sim_timeout=self.timeout)
sampler.sample(self.rep)
self.sens_results = sampler.getdata()
#Hymod resuts are empty with Python <3.6
sampler = spotpy.algorithms.dream(hymod_setup(_used_algorithm='dream'),
sim_timeout=self.timeout)
self.r_hat = sampler.sample(self.rep)
self.hymod_results = sampler.getdata()
def test_setUp(self):
self.assertEqual(len(self.results), self.rep)
self.assertEqual(len(self.griewank_results), self.rep)
if sys.version_info >= (3, 6):
self.assertEqual(len(self.hymod_results), self.rep)
self.assertEqual(len(self.sens_results), self.rep)
def test_get_parameters(self):
self.assertEqual(len(spotpy.analyser.get_parameters(
self.results)[0]), 3)
def test_get_parameternames(self):
self.assertEqual(spotpy.analyser.get_parameternames(
self.results
),['x', 'y', 'z'])
def test_get_parameter_fields(self):
self.assertEqual(len(spotpy.analyser.get_parameternames(
self.results
)), 3)
def test_get_minlikeindex(self):
minlikeindex = spotpy.analyser.get_minlikeindex(
self.results
)
self.assertEqual(len(minlikeindex), 2)
self.assertEqual(type(minlikeindex),type((1,1)))
def test_get_maxlikeindex(self):
get_maxlikeindex = spotpy.analyser.get_maxlikeindex(
self.results
)
self.assertEqual(len(get_maxlikeindex), 2)
self.assertEqual(type(get_maxlikeindex),type((1,1)))
def test_get_like_fields(self):
get_like_fields = spotpy.analyser.get_like_fields(
self.results
)
self.assertEqual(len(get_like_fields), 1)
self.assertEqual(type(get_like_fields),type([]))
def test_calc_like(self):
calc_like = spotpy.analyser.calc_like(
self.results,
rosenbrock_setup().evaluation(),spotpy.objectivefunctions.rmse)
self.assertEqual(len(calc_like), len(self.results))
self.assertEqual(type(calc_like), type([]))
def test_get_best_parameterset(self):
get_best_parameterset_true = spotpy.analyser.get_best_parameterset(
self.results,True)
get_best_parameterset_false = spotpy.analyser.get_best_parameterset(
self.results, False)
self.assertEqual(len(get_best_parameterset_true[0]), 3)
self.assertEqual(type(get_best_parameterset_true[0]), np.void)
self.assertEqual(len(get_best_parameterset_false[0]), 3)
self.assertEqual(type(get_best_parameterset_false[0]), np.void)
def test_get_modelruns(self):
get_modelruns = spotpy.analyser.get_modelruns(
self.results
)
self.assertEqual(len(get_modelruns[0]), 1)
self.assertEqual(type(get_modelruns[0]), np.void)
def test_get_header(self):
get_header = spotpy.analyser.get_header(
self.results
)
self.assertEqual(len(get_header), 6)
self.assertEqual(type(get_header), type(()))
def test_get_min_max(self):
get_min_max = spotpy.analyser.get_min_max(spotpy_setup=rosenbrock_setup())
self.assertEqual(len(get_min_max[0]), 3)
self.assertEqual(type(get_min_max), type(()))
def test_get_parbounds(self):
get_parbounds = spotpy.analyser.get_parbounds(spotpy_setup=rosenbrock_setup())
self.assertEqual(len(get_parbounds[0]), 2)
self.assertEqual(len(get_parbounds), 3)
self.assertEqual(type(get_parbounds), type([]))
def test_get_percentiles(self):
get_percentiles = spotpy.analyser.get_percentiles(
self.results)
self.assertEqual(len(get_percentiles),5)
self.assertEqual(type(get_percentiles[0][0]), type(np.abs(-1.0)))
self.assertEqual(type(get_percentiles),type(()))
def test__geweke(self):
sample1 = []
for a in self.results:
sample1.append(a[0])
_geweke = spotpy.analyser._Geweke(sample1)
self.assertEqual(len(_geweke), 20)
self.assertEqual(type(_geweke), type(np.array([])))
def test_plot_Geweke(self):
sample1 = []
for a in self.results:
sample1.append(a[0])
spotpy.analyser.plot_Geweke(sample1,"sample1")
plt.savefig(self.fig_name)
# approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so
# we expecting a plot with some content without testing the structure of the plot, just
# the size
self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855)
def test_get_sensitivity_of_fast(self):
if sys.version_info >= (3, 6):
get_sensitivity_of_fast = spotpy.analyser.get_sensitivity_of_fast(self.sens_results)
self.assertEqual(len(get_sensitivity_of_fast), 2)
self.assertEqual(len(get_sensitivity_of_fast["S1"]), 3)
self.assertEqual(len(get_sensitivity_of_fast["ST"]), 3)
self.assertEqual(type(get_sensitivity_of_fast), type({}))
def test_get_simulation_fields(self):
get_simulation_fields = spotpy.analyser.get_simulation_fields(
self.results)
self.assertEqual(['simulation_0'],get_simulation_fields)
def test_compare_different_objectivefunctions(self):
sampler = spotpy.algorithms.lhs(rosenbrock_setup(),
sim_timeout=self.timeout)
sampler.sample(self.rep)
compare_different_objectivefunctions = spotpy.analyser.compare_different_objectivefunctions(
sampler.getdata()['like1'], self.results['like1'])
self.assertEqual(type(compare_different_objectivefunctions[1]),type(np.array([0.5])[0]))
def test_plot_parameter_uncertainty(self):
if sys.version_info >= (3, 6):
posterior = spotpy.analyser.get_posterior(self.hymod_results,percentage=10)
#assertAlmostEqual tests on after comma accuracy, therefor we divide both by 100
self.assertAlmostEqual(len(posterior)/100, self.rep*0.001, 1)
self.assertEqual(type(posterior), type(np.array([])))
spotpy.analyser.plot_parameter_uncertainty(posterior,
hymod_setup().evaluation(),
fig_name=self.fig_name)
# approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so
# we expecting a plot with some content without testing the structure of the plot, just
# the size
self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855)
def test_plot_fast_sensitivity(self):
if sys.version_info >= (3, 6):
spotpy.analyser.plot_fast_sensitivity(self.sens_results,fig_name=self.fig_name)
# approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so
# we expecting a plot with some content without testing the structure of the plot, just
# the size
self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855)
def test_plot_heatmap_griewank(self):
spotpy.analyser.plot_heatmap_griewank([self.griewank_results],["test"],
fig_name=self.fig_name)
# approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so
# we expecting a plot with some content without testing the structure of the plot, just
# the size
self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855)
def test_plot_objectivefunction(self):
spotpy.analyser.plot_objectivefunction(self.results,
rosenbrock_setup().evaluation(),
fig_name=self.fig_name)
# approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so
# we expecting a plot with some content without testing the structure of the plot, just
# the size
self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855)
def test_plot_parametertrace_algorithms(self):
spotpy.analyser.plot_parametertrace_algorithms([self.griewank_results],
["test_plot_parametertrace_algorithms"],
spot_setup=griewank_setup(),
fig_name=self.fig_name)
# approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so
# we expecting a plot with some content without testing the structure of the plot, just
# the size
self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855)
os.remove(self.fig_name)
def test_plot_parametertrace(self):
spotpy.analyser.plot_parametertrace(self.griewank_results, ["0","1"], fig_name=self.fig_name)
# approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so
# we expecting a plot with some content without testing the structure of the plot, just
# the size
self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855)
def test_plot_posterior_parametertrace(self):
spotpy.analyser.plot_posterior_parametertrace(self.griewank_results, ["0","1"], fig_name=self.fig_name)
# approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so
# we expecting a plot with some content without testing the structure of the plot, just
# the size
self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855)
def test_plot_posterior(self):
if sys.version_info >= (3, 6):
spotpy.analyser.plot_posterior(self.hymod_results
, hymod_setup().evaluation(),fig_name=self.fig_name)
# approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so
# we expecting a plot with some content without testing the structure of the plot, just
# the size
self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855)
def test_plot_bestmodelrun(self):
spotpy.analyser.plot_bestmodelrun(self.griewank_results,
griewank_setup().evaluation(), fig_name=self.fig_name)
# approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so
# we expecting a plot with some content without testing the structure of the plot, just
# the size
self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855)
os.remove(self.fig_name)
def test_plot_bestmodelruns(self):
if sys.version_info >= (3, 6):
spotpy.analyser.plot_bestmodelruns(
[self.hymod_results[0:10],self.hymod_results[10:20]], hymod_setup().evaluation(),
dates=range(1, 1+len(hymod_setup().evaluation())), algorithms=["test", "test2"],
fig_name=self.fig_name)
# approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so
# we expecting a plot with some content without testing the structure of the plot, just
# the size
self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855)
def test_plot_objectivefunctiontraces(self):
spotpy.analyser.plot_objectivefunctiontraces([self.results],
[rosenbrock_setup().evaluation()],
["test"],fig_name=self.fig_name)
# approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so
# we expecting a plot with some content without testing the structure of the plot, just
# the size
self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855)
def test_plot_regression(self):
spotpy.analyser.plot_regression(self.results, rosenbrock_setup().evaluation(),
fig_name=self.fig_name)
# approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so
# we expecting a plot with some content without testing the structure of the plot, just
# the size
self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855)
def test_plot_parameterInteraction(self):
# Test only untder Python 3.6 as lower versions results in a strange ValueError
if sys.version_info >= (3, 6):
spotpy.analyser.plot_parameterInteraction(self.results,
fig_name = self.fig_name)
# approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so
# we expecting a plot with some content without testing the structure of the plot, just
# the size
self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855)
def test_plot_allmodelruns(self):
if sys.version_info >= (3, 6):
modelruns = []
for run in self.hymod_results:
on_run = []
for i in run:
on_run.append(i)
on_run = np.array(on_run)[:-7]
modelruns.append(on_run.tolist())
spotpy.analyser.plot_allmodelruns(modelruns, hymod_setup().evaluation(),
dates=range(1, len(hymod_setup().evaluation()) + 1),
fig_name=self.fig_name)
# approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so
# we expecting a plot with some content without testing the structure of the plot, just
# the size
self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855)
def test_plot_autocorellation(self):
spotpy.analyser.plot_autocorellation(self.results["parx"],"parx", fig_name=self.fig_name)
# approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so
# we expecting a plot with some content without testing the structure of the plot, just
# the size
self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855)
def test_plot_gelman_rubin(self):
if sys.version_info >= (3, 6):
spotpy.analyser.plot_gelman_rubin(self.r_hat, fig_name =self.fig_name)
self.assertGreaterEqual(abs(os.path.getsize(self.fig_name)), 100)
@classmethod
def tearDownClass(self):
os.remove('test_output.png')
if __name__ == '__main__':
unittest.main(exit=False)
|
mit
|
dask-image/dask-ndfourier
|
dask_ndfourier/__init__.py
|
1
|
6264
|
# -*- coding: utf-8 -*-
__author__ = """John Kirkham"""
__email__ = "[email protected]"
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
import numbers
import dask.array
from dask_ndfourier import _compat
from dask_ndfourier import _utils
def fourier_gaussian(input, sigma, n=-1, axis=-1):
"""
Multi-dimensional Gaussian fourier filter.
The array is multiplied with the fourier transform of a Gaussian
kernel.
Parameters
----------
input : array_like
The input array.
sigma : float or sequence
The sigma of the Gaussian kernel. If a float, `sigma` is the same for
all axes. If a sequence, `sigma` has to contain one value for each
axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
Returns
-------
fourier_gaussian : Dask Array
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_gaussian(input_, sigma=4)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
"""
# Validate and normalize arguments
input, sigma, n, axis = _utils._norm_args(input, sigma, n=n, axis=axis)
# Compute frequencies
ang_freq_grid = _utils._get_ang_freq_grid(
input.shape,
chunks=input.chunks,
dtype=sigma.dtype
)
# Compute Fourier transformed Gaussian
gaussian = dask.array.exp(
- dask.array.tensordot(sigma ** 2, ang_freq_grid ** 2, axes=1) / 2
)
result = input * gaussian
return result
def fourier_shift(input, shift, n=-1, axis=-1):
"""
Multi-dimensional fourier shift filter.
The array is multiplied with the fourier transform of a shift operation.
Parameters
----------
input : array_like
The input array.
shift : float or sequence
The size of the box used for filtering.
If a float, `shift` is the same for all axes. If a sequence, `shift`
has to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
Returns
-------
fourier_shift : Dask Array
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> import numpy.fft
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_shift(input_, shift=200)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
if issubclass(input.dtype.type, numbers.Real):
input = input.astype(complex)
# Validate and normalize arguments
input, shift, n, axis = _utils._norm_args(input, shift, n=n, axis=axis)
# Constants with type converted
J = input.dtype.type(1j)
# Get the grid of frequencies
ang_freq_grid = _utils._get_ang_freq_grid(
input.shape,
chunks=input.chunks,
dtype=shift.dtype
)
# Apply shift
phase_shift = dask.array.exp(
- J * dask.array.tensordot(shift, ang_freq_grid, axes=1)
)
result = input * phase_shift
return result
def fourier_uniform(input, size, n=-1, axis=-1):
"""
Multi-dimensional uniform fourier filter.
The array is multiplied with the fourier transform of a box of given
size.
Parameters
----------
input : array_like
The input array.
size : float or sequence
The size of the box used for filtering.
If a float, `size` is the same for all axes. If a sequence, `size` has
to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
Returns
-------
fourier_uniform : Dask Array
The filtered input. If `output` is given as a parameter, None is
returned.
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_uniform(input_, size=20)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
# Validate and normalize arguments
input, size, n, axis = _utils._norm_args(input, size, n=n, axis=axis)
# Get the grid of frequencies
freq_grid = _utils._get_freq_grid(
input.shape,
chunks=input.chunks,
dtype=size.dtype
)
# Compute uniform filter
uniform = _compat._sinc(
size[(slice(None),) + input.ndim * (None,)] * freq_grid
)
uniform = dask.array.prod(uniform, axis=0)
result = input * uniform
return result
|
bsd-3-clause
|
jameslao/QuantSoftwareToolkit
|
setup.py
|
1
|
1364
|
'''Author: Sourabh Bajaj'''
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
setup(
name='QSTK',
version='0.2.6',
author='Sourabh Bajaj',
packages=find_packages(),
namespace_packages=['QSTK'],
include_package_data=True,
long_description=open('README.md').read(),
author_email='[email protected]',
url='https://github.com/tucker777/QuantSoftwareToolkit',
license=open('LICENSE.txt').read(),
description='QuantSoftware Toolkit',
zip_safe=False,
install_requires=[
"numpy >= 1.6.1",
"scipy >= 0.9.0",
"matplotlib >= 1.1.0",
"pandas >= 0.7.3",
"python-dateutil >= 1.5",
"scikit-learn >= 0.11",
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Intended Audience :: Financial and Insurance Industry',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python :: 3.3',
'Topic :: Utilities',
],
)
|
bsd-3-clause
|
bavardage/statsmodels
|
statsmodels/sandbox/examples/example_crossval.py
|
3
|
2215
|
import numpy as np
from statsmodels.sandbox.tools import cross_val
if __name__ == '__main__':
#A: josef-pktd
import statsmodels.api as sm
from statsmodels.api import OLS
#from statsmodels.datasets.longley import load
from statsmodels.datasets.stackloss import load
from statsmodels.iolib.table import (SimpleTable, default_txt_fmt,
default_latex_fmt, default_html_fmt)
import numpy as np
data = load()
data.exog = sm.tools.add_constant(data.exog, prepend=False)
resols = sm.OLS(data.endog, data.exog).fit()
print '\n OLS leave 1 out'
for inidx, outidx in cross_val.LeaveOneOut(len(data.endog)):
res = sm.OLS(data.endog[inidx], data.exog[inidx,:]).fit()
print data.endog[outidx], res.model.predict(res.params, data.exog[outidx,:]),
print data.endog[outidx] - res.model.predict(res.params, data.exog[outidx,:])
print '\n OLS leave 2 out'
resparams = []
for inidx, outidx in cross_val.LeavePOut(len(data.endog), 2):
res = sm.OLS(data.endog[inidx], data.exog[inidx,:]).fit()
#print data.endog[outidx], res.model.predict(data.exog[outidx,:]),
#print ((data.endog[outidx] - res.model.predict(data.exog[outidx,:]))**2).sum()
resparams.append(res.params)
resparams = np.array(resparams)
print resparams
doplots = 1
if doplots:
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
plt.figure()
figtitle = 'Leave2out parameter estimates'
t = plt.gcf().text(0.5,
0.95, figtitle,
horizontalalignment='center',
fontproperties=FontProperties(size=16))
for i in range(resparams.shape[1]):
plt.subplot(4, 2, i+1)
plt.hist(resparams[:,i], bins = 10)
#plt.title("Leave2out parameter estimates")
plt.show()
for inidx, outidx in cross_val.KStepAhead(20,2):
#note the following were broken because KStepAhead returns now a slice by default
print inidx
print np.ones(20)[inidx].sum(), np.arange(20)[inidx][-4:]
print outidx
print np.nonzero(np.ones(20)[outidx])[0][()]
|
bsd-3-clause
|
rubikloud/scikit-learn
|
sklearn/feature_extraction/tests/test_dict_vectorizer.py
|
276
|
3790
|
# Authors: Lars Buitinck <[email protected]>
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
|
bsd-3-clause
|
Andrew-McNab-UK/DIRAC
|
docs/Tools/fakeEnvironment.py
|
4
|
4827
|
''' fakeEnvironment
this module allows to create the documentation without having to do
any kind of special installation. The list of mocked modules is:
GSI
'''
import mock
import sys
#...............................................................................
# mocks...
class MyMock(mock.Mock):
def __len__(self):
return 0
# GSI
mockGSI = MyMock()
mockGSI.__version__ = "1"
mockGSI.version.__version__ = "1"
# MySQLdb
mockMySQLdb = mock.Mock()
#...............................................................................
# sys.modules hacked
sys.modules[ 'GSI' ] = mockGSI
sys.modules[ 'MySQLdb' ] = mockMySQLdb
sys.modules[ 'MySQLdb.cursors' ] = mock.Mock()
#FIXME: do we need all them ??
sys.modules[ 'sqlalchemy' ] = mock.Mock()
sys.modules[ 'sqlalchemy.orm' ] = mock.Mock()
sys.modules[ 'sqlalchemy.orm.exc' ] = mock.Mock()
sys.modules[ 'sqlalchemy.orm.query' ] = mock.Mock()
sys.modules[ 'sqlalchemy.engine' ] = mock.Mock()
sys.modules[ 'sqlalchemy.engine.reflection' ] = mock.Mock()
sys.modules[ 'sqlalchemy.ext' ] = mock.Mock()
sys.modules[ 'sqlalchemy.ext.declarative' ] = mock.Mock()
sys.modules[ 'sqlalchemy.schema' ] = mock.Mock()
sys.modules[ 'sqlalchemy.sql' ] = mock.Mock()
sys.modules[ 'sqlalchemy.sql.expression' ] = mock.Mock()
sys.modules[ 'lcg_util' ] = mock.Mock()
sys.modules[ 'suds' ] = mock.Mock()
sys.modules[ 'suds.client' ] = mock.Mock()
sys.modules[ 'suds.transport' ] = mock.Mock()
sys.modules[ 'irods' ] = mock.Mock()
sys.modules[ 'pylab' ] = mock.Mock()
sys.modules[ 'pytz' ] = mock.Mock()
sys.modules[ 'numpy' ] = mock.Mock()
sys.modules[ 'numpy.random' ] = mock.Mock()
sys.modules[ 'matplotlib' ] = mock.Mock()
sys.modules[ 'matplotlib.ticker' ] = mock.Mock()
sys.modules[ 'matplotlib.figure' ] = mock.Mock()
sys.modules[ 'matplotlib.patches' ] = mock.Mock()
sys.modules[ 'matplotlib.dates' ] = mock.Mock()
sys.modules[ 'matplotlib.text' ] = mock.Mock()
sys.modules[ 'matplotlib.axes' ] = mock.Mock()
sys.modules[ 'matplotlib.pylab' ] = mock.Mock()
sys.modules[ 'matplotlib.lines' ] = mock.Mock()
sys.modules[ 'matplotlib.cbook' ] = mock.Mock()
sys.modules[ 'matplotlib.colors' ] = mock.Mock()
sys.modules[ 'matplotlib.cm' ] = mock.Mock()
sys.modules[ 'matplotlib.colorbar' ] = mock.Mock()
sys.modules[ 'cx_Oracle' ] = mock.Mock()
sys.modules[ 'dateutil' ] = mock.Mock()
sys.modules[ 'dateutil.relativedelta' ] = mock.Mock()
sys.modules[ 'matplotlib.backends' ] = mock.Mock()
sys.modules[ 'matplotlib.backends.backend_agg' ] = mock.Mock()
sys.modules[ 'fts3' ] = mock.Mock()
sys.modules[ 'fts3.rest' ] = mock.Mock()
sys.modules[ 'fts3.rest.client' ] = mock.Mock()
sys.modules[ 'fts3.rest.client.easy' ] = mock.Mock()
sys.modules[ 'fts3.rest.client.exceptions' ] = mock.Mock()
sys.modules[ 'fts3.rest.client.request' ] = mock.Mock()
sys.modules[ 'pyparsing' ] = mock.MagicMock()
sys.modules[ 'stomp' ] = mock.MagicMock()
sys.modules[ 'psutil' ] = mock.MagicMock()
sys.modules[ '_arc' ] = mock.Mock()
sys.modules[ 'arc' ] = mock.Mock()
sys.modules[ 'arc.common' ] = mock.Mock()
sys.modules[ 'gfal2' ] = mock.Mock()
sys.modules[ 'XRootD' ] = mock.Mock()
sys.modules[ 'XRootD.client' ] = mock.Mock()
sys.modules[ 'XRootD.client.flags' ] = mock.Mock()
sys.modules[ 'elasticsearch' ] = mock.Mock()
sys.modules[ 'elasticsearch.Elasticsearch' ] = mock.Mock()
sys.modules[ 'elasticsearch_dsl' ] = mock.Mock()
sys.modules[ 'elasticsearch.exceptions' ] = mock.Mock()
sys.modules[ 'elasticsearch.helpers' ] = mock.Mock()
#PlotCache and PlottingHandler create a thread and prevent sphinx from exiting
#sys.modules[ 'DIRAC.FrameworkSystem.Service.PlotCache' ] = mock.MagicMock()
#sys.modules[ 'DIRAC.FrameworkSystem.Service.PlottingHandler' ] = mock.MagicMock()
#...............................................................................
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
gpl-3.0
|
architecture-building-systems/CEAforArcGIS
|
cea/technologies/heatpumps.py
|
1
|
17744
|
"""
heatpumps
"""
from math import floor, log, ceil
import pandas as pd
from cea.optimization.constants import HP_DELTA_T_COND, HP_DELTA_T_EVAP, HP_ETA_EX, HP_ETA_EX_COOL, HP_AUXRATIO, \
GHP_AUXRATIO, HP_MAX_T_COND, GHP_ETA_EX, GHP_CMAX_SIZE_TH, HP_MAX_SIZE, HP_COP_MAX, HP_COP_MIN
from cea.constants import HEAT_CAPACITY_OF_WATER_JPERKGK
import numpy as np
from cea.analysis.costs.equations import calc_capex_annualized, calc_opex_annualized
__author__ = "Thuy-An Nguyen"
__copyright__ = "Copyright 2015, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Thuy-An Nguyen", "Tim Vollrath", "Jimeno A. Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "[email protected]"
__status__ = "Production"
#============================
#operation costs
#============================
def HP_air_air(mdot_cp_WC, t_sup_K, t_re_K, tsource_K):
"""
For the operation of a heat pump (direct expansion unit) connected to minisplit units
:type mdot_cp_WC : float
:param mdot_cp_WC: capacity mass flow rate.
:type t_sup_K : float
:param t_sup_K: supply temperature to the minisplit unit (cold)
:type t_re_K : float
:param t_re_K: return temeprature from the minisplit unit (hot)
:type tsource_K : float
:param tsource_K: temperature of the source
:rtype wdot_el : float
:returns wdot_el: total electric power requirement for compressor and auxiliary el.
:rtype qcolddot : float
:returns qcolddot: cold power requirement
..[C. Montagud et al., 2014] C. Montagud, J.M. Corberan, A. Montero (2014). In situ optimization methodology for
the water circulation pump frequency of ground source heat pump systems. Energy and Buildings
+ reverse cycle
"""
if mdot_cp_WC > 0.0:
# calculate condenser temperature
tcond_K = tsource_K
# calculate evaporator temperature
tevap_K = t_sup_K # approximate evaporator temperature with air-side supply temperature
# calculate COP
if np.isclose(tcond_K, tevap_K):
print('condenser temperature is equal to evaporator temperature, COP set to the maximum')
COP = HP_COP_MAX
else:
COP = HP_ETA_EX_COOL * tevap_K / (tcond_K - tevap_K)
# in order to work in the limits of the equation
if COP > HP_COP_MAX:
COP = HP_COP_MAX
elif COP < 1.0:
COP = HP_COP_MIN
qcolddot_W = mdot_cp_WC * (t_re_K - t_sup_K)
wdot_W = qcolddot_W / COP
E_req_W = wdot_W / HP_AUXRATIO # compressor power [C. Montagud et al., 2014]_
else:
E_req_W = 0.0
return E_req_W
def calc_Cop_GHP(ground_temp_K, mdot_kgpers, T_DH_sup_K, T_re_K):
"""
For the operation of a Geothermal heat pump (GSHP) supplying DHN.
:type mdot_kgpers : float
:param mdot_kgpers: supply mass flow rate to the DHN
:type T_DH_sup_K : float
:param T_DH_sup_K: supply temperature to the DHN (hot)
:type T_re_K : float
:param T_re_K: return temeprature from the DHN (cold)
:rtype wdot_el : float
:returns wdot_el: total electric power requirement for compressor and auxiliary el.
:rtype qcolddot : float
:returns qcolddot: cold power requirement
:rtype qhotdot_missing : float
:returns qhotdot_missing: deficit heating energy from GSHP
:rtype tsup2 :
:returns tsup2: supply temperature after HP (to DHN)
..[O. Ozgener et al., 2005] O. Ozgener, A. Hepbasli (2005). Experimental performance analysis of a solar assisted
ground-source heat pump greenhouse heating system, Energy Build.
..[C. Montagud et al., 2014] C. Montagud, J.M. Corberan, A. Montero (2014). In situ optimization methodology for
the water circulation pump frequency of ground source heat pump systems. Energy and Buildings
"""
tsup2_K = T_DH_sup_K # tsup2 = tsup, if all load can be provided by the HP
# calculate condenser temperature
tcond_K = T_DH_sup_K + HP_DELTA_T_COND
if tcond_K > HP_MAX_T_COND:
#raise ModelError
tcond_K = HP_MAX_T_COND
tsup2_K = tcond_K - HP_DELTA_T_COND # lower the supply temp if necessary, tsup2 < tsup if max load is not enough
# calculate evaporator temperature
tevap_K = ground_temp_K - HP_DELTA_T_EVAP
COP = GHP_ETA_EX / (1 - tevap_K / tcond_K) # [O. Ozgener et al., 2005]_
qhotdot_W = mdot_kgpers * HEAT_CAPACITY_OF_WATER_JPERKGK * (tsup2_K - T_re_K)
qhotdot_missing_W = mdot_kgpers * HEAT_CAPACITY_OF_WATER_JPERKGK * (T_DH_sup_K - tsup2_K) #calculate the missing energy if tsup2 < tsup
wdot_W = qhotdot_W / COP
wdot_el_W = wdot_W / GHP_AUXRATIO # compressor power [C. Montagud et al., 2014]_
qcolddot_W = qhotdot_W - wdot_W
return wdot_el_W, qcolddot_W, qhotdot_missing_W, tsup2_K
def GHP_op_cost(mdot_kgpers, t_sup_K, t_re_K, t_sup_GHP_K, Q_therm_GHP_W):
"""
Operation cost of sewage water HP supplying DHN
:type mdot_kgpers : float
:param mdot_kgpers: supply mass flow rate to the DHN
:type t_sup_K : float
:param t_sup_K: supply temperature to the DHN (hot)
:type t_re_K : float
:param t_re_K: return temeprature from the DHN (cold)
:type t_sup_GHP_K : float
:param t_sup_GHP_K: sewage supply temperature
:rtype C_HPSew_el_pure: float
:returns C_HPSew_el_pure: electricity cost of sewage water HP operation
:rtype C_HPSew_per_kWh_th_pure: float
:returns C_HPSew_per_kWh_th_pure: electricity cost per kWh thermal energy produced from sewage water HP
:rtype qcoldot: float
:returns qcoldot: cold power requirement
:rtype q_therm: float
:returns q_therm: thermal energy supplied to DHN
:rtype wdot: float
:returns wdot: electricty required for sewage water HP operation
..[L. Girardin et al., 2010] L. Girardin, F. Marechal, M. Dubuis, N. Calame-Darbellay, D. Favrat (2010). EnerGis:
a geographical information based system for the evaluation of integrated energy conversion systems in urban areas,
Energy.
"""
if (t_sup_K + HP_DELTA_T_COND) == t_sup_GHP_K:
COP = 1
else:
COP = HP_ETA_EX * (t_sup_K + HP_DELTA_T_COND) / ((t_sup_K + HP_DELTA_T_COND) - t_sup_GHP_K)
if t_sup_K == t_re_K:
q_therm_W = 0
qcoldot_W = 0
E_GHP_req_W = 0
else:
q_therm_W = mdot_kgpers * HEAT_CAPACITY_OF_WATER_JPERKGK * (t_sup_K - t_re_K)
if q_therm_W > Q_therm_GHP_W:
q_therm_W = Q_therm_GHP_W
qcoldot_W = q_therm_W * (1 - (1 / COP))
E_GHP_req_W = q_therm_W / COP
return E_GHP_req_W, qcoldot_W, q_therm_W
def GHP_Op_max(Q_max_GHP_W, tsup_K, tground_K):
"""
For the operation of a Geothermal heat pump (GSHP) at maximum capacity supplying DHN.
:type tsup_K : float
:param tsup_K: supply temperature to the DHN (hot)
:type tground_K : float
:param tground_K: ground temperature
:type nProbes: float
:param nProbes: bumber of probes
:rtype qhotdot: float
:returns qhotdot: heating energy provided from GHSP
:rtype COP: float
:returns COP: coefficient of performance of GSHP
"""
COP = HP_ETA_EX * (tsup_K + HP_DELTA_T_COND) / ((tsup_K + HP_DELTA_T_COND) - tground_K)
qhotdot_Wh = Q_max_GHP_W /( 1 - ( 1 / COP ) )
return qhotdot_Wh, COP
def HPLake_op_cost(Q_gen_W, tsup_K, tret_K, tlake):
"""
For the operation of lake heat pump supplying DHN
:type mdot_kgpers : float
:param mdot_kgpers: supply mass flow rate to the DHN
:type tsup_K : float
:param tsup_K: supply temperature to the DHN (hot)
:type tret_K : float
:param tret_K: return temeprature from the DHN (cold)
:type tlake : float
:param tlake: lake temperature
:rtype C_HPL_el: float
:returns C_HPL_el: electricity cost of Lake HP operation
:rtype wdot: float
:returns wdot: electricty required for Lake HP operation
:rtype Q_cold_primary: float
:returns Q_cold_primary: cold power requirement
:rtype Q_therm: float
:returns Q_therm: thermal energy supplied to DHN
"""
mdot_kgpers = Q_gen_W / (HEAT_CAPACITY_OF_WATER_JPERKGK * (tsup_K - tret_K))
E_HPLake_req_W, qcolddot_W = HPLake_Op(mdot_kgpers, tsup_K, tret_K, tlake)
Q_therm_W = mdot_kgpers * HEAT_CAPACITY_OF_WATER_JPERKGK * (tsup_K - tret_K)
Q_cold_primary_W = qcolddot_W
return E_HPLake_req_W, Q_cold_primary_W, Q_therm_W
def HPLake_Op(mdot_kgpers, t_sup_K, t_re_K, t_lake_K):
"""
For the operation of a Heat pump between a district heating network and a lake
:type mdot_kgpers : float
:param mdot_kgpers: supply mass flow rate to the DHN
:type t_sup_K : float
:param t_sup_K: supply temperature to the DHN (hot)
:type t_re_K : float
:param t_re_K: return temeprature from the DHN (cold)
:type t_lake_K : float
:param t_lake_K: lake temperature
:rtype wdot_el : float
:returns wdot_el: total electric power requirement for compressor and auxiliary el.
:rtype qcolddot : float
:returns qcolddot: cold power requirement
..[L. Girardin et al., 2010] L. Girardin, F. Marechal, M. Dubuis, N. Calame-Darbellay, D. Favrat (2010). EnerGis:
a geographical information based system for the evaluation of integrated energy conversion systems in urban areas,
Energy.
..[C. Montagud et al., 2014] C. Montagud, J.M. Corberan, A. Montero (2014). In situ optimization methodology for
the water circulation pump frequency of ground source heat pump systems. Energy and Buildings
"""
# calculate condenser temperature
tcond = t_sup_K + HP_DELTA_T_COND
if tcond > HP_MAX_T_COND:
tcond = HP_MAX_T_COND
# calculate evaporator temperature
tevap_K = t_lake_K - HP_DELTA_T_EVAP
COP = HP_ETA_EX / (1 - tevap_K / tcond) # [L. Girardin et al., 2010]_
q_hotdot_W = mdot_kgpers * HEAT_CAPACITY_OF_WATER_JPERKGK * (t_sup_K - t_re_K)
if q_hotdot_W > HP_MAX_SIZE:
print("Qhot above max size on the market !")
wdot_W = q_hotdot_W / COP
E_HPLake_req_W = wdot_W / HP_AUXRATIO # compressor power [C. Montagud et al., 2014]_
q_colddot_W = q_hotdot_W - wdot_W
return E_HPLake_req_W, q_colddot_W
def HPSew_op_cost(mdot_kgpers, t_sup_K, t_re_K, t_sup_sew_K, Q_therm_Sew_W):
"""
Operation cost of sewage water HP supplying DHN
:type mdot_kgpers : float
:param mdot_kgpers: supply mass flow rate to the DHN
:type t_sup_K : float
:param t_sup_K: supply temperature to the DHN (hot)
:type t_re_K : float
:param t_re_K: return temeprature from the DHN (cold)
:type t_sup_sew_K : float
:param t_sup_sew_K: sewage supply temperature
:rtype C_HPSew_el_pure: float
:returns C_HPSew_el_pure: electricity cost of sewage water HP operation
:rtype C_HPSew_per_kWh_th_pure: float
:returns C_HPSew_per_kWh_th_pure: electricity cost per kWh thermal energy produced from sewage water HP
:rtype qcoldot: float
:returns qcoldot: cold power requirement
:rtype q_therm: float
:returns q_therm: thermal energy supplied to DHN
:rtype wdot: float
:returns wdot: electricty required for sewage water HP operation
..[L. Girardin et al., 2010] L. Girardin, F. Marechal, M. Dubuis, N. Calame-Darbellay, D. Favrat (2010). EnerGis:
a geographical information based system for the evaluation of integrated energy conversion systems in urban areas,
Energy.
"""
if (t_sup_K + HP_DELTA_T_COND) == t_sup_sew_K:
COP = 1
else:
COP = HP_ETA_EX * (t_sup_K + HP_DELTA_T_COND) / ((t_sup_K + HP_DELTA_T_COND) - t_sup_sew_K)
if t_sup_sew_K >= t_sup_K + HP_DELTA_T_COND:
q_therm_W = Q_therm_Sew_W
qcoldot_W = Q_therm_Sew_W
E_HPSew_req_W = 0.0
else:
q_therm_W = mdot_kgpers * HEAT_CAPACITY_OF_WATER_JPERKGK * (t_sup_K - t_re_K)
if q_therm_W > Q_therm_Sew_W:
q_therm_W = Q_therm_Sew_W
qcoldot_W = q_therm_W * (1 - (1 / COP))
E_HPSew_req_W = q_therm_W / COP
return qcoldot_W, q_therm_W, E_HPSew_req_W
def calc_Cinv_HP(HP_Size, locator, technology_type):
"""
Calculates the annualized investment costs for a water to water heat pump.
:type HP_Size : float
:param HP_Size: Design thermal size of the heat pump in [W]
:rtype InvCa : float
:returns InvCa: annualized investment costs in [CHF/a]
..[C. Weber, 2008] C.Weber, Multi-objective design and optimization of district energy systems including
polygeneration energy conversion technologies., PhD Thesis, EPFL
"""
Capex_a_HP_USD = 0.0
Opex_fixed_HP_USD = 0.0
Capex_HP_USD = 0.0
if HP_Size > 0.0:
HP_cost_data = pd.read_excel(locator.get_database_conversion_systems(), sheet_name="HP")
HP_cost_data = HP_cost_data[HP_cost_data['code'] == technology_type]
# if the Q_design is below the lowest capacity available for the technology, then it is replaced by the least
# capacity for the corresponding technology from the database
if HP_Size < HP_cost_data.iloc[0]['cap_min']:
HP_Size = HP_cost_data.iloc[0]['cap_min']
max_HP_size = max(HP_cost_data['cap_max'].values)
if HP_Size <= max_HP_size:
HP_cost_data = HP_cost_data[
(HP_cost_data['cap_min'] <= HP_Size) & (HP_cost_data['cap_max'] > HP_Size)]
Inv_a = HP_cost_data.iloc[0]['a']
Inv_b = HP_cost_data.iloc[0]['b']
Inv_c = HP_cost_data.iloc[0]['c']
Inv_d = HP_cost_data.iloc[0]['d']
Inv_e = HP_cost_data.iloc[0]['e']
Inv_IR = HP_cost_data.iloc[0]['IR_%']
Inv_LT = HP_cost_data.iloc[0]['LT_yr']
Inv_OM = HP_cost_data.iloc[0]['O&M_%'] / 100
InvC = Inv_a + Inv_b * (HP_Size) ** Inv_c + (Inv_d + Inv_e * HP_Size) * log(HP_Size)
Capex_a_HP_USD = calc_capex_annualized(InvC, Inv_IR, Inv_LT)
Opex_fixed_HP_USD = InvC * Inv_OM
Capex_HP_USD = InvC
else:
number_of_chillers = int(ceil(HP_Size / max_HP_size))
Q_nom_each_chiller = HP_Size / number_of_chillers
HP_cost_data = HP_cost_data[
(HP_cost_data['cap_min'] <= Q_nom_each_chiller) & (HP_cost_data['cap_max'] > Q_nom_each_chiller)]
for i in range(number_of_chillers):
Inv_a = HP_cost_data.iloc[0]['a']
Inv_b = HP_cost_data.iloc[0]['b']
Inv_c = HP_cost_data.iloc[0]['c']
Inv_d = HP_cost_data.iloc[0]['d']
Inv_e = HP_cost_data.iloc[0]['e']
Inv_IR = HP_cost_data.iloc[0]['IR_%']
Inv_LT = HP_cost_data.iloc[0]['LT_yr']
Inv_OM = HP_cost_data.iloc[0]['O&M_%'] / 100
InvC = Inv_a + Inv_b * (Q_nom_each_chiller) ** Inv_c + (Inv_d + Inv_e * Q_nom_each_chiller) * log(Q_nom_each_chiller)
Capex_a_HP_USD += calc_capex_annualized(InvC, Inv_IR, Inv_LT)
Opex_fixed_HP_USD += InvC * Inv_OM
Capex_HP_USD += InvC
else:
Capex_a_HP_USD = Opex_fixed_HP_USD = Capex_HP_USD = 0.0
return Capex_a_HP_USD, Opex_fixed_HP_USD, Capex_HP_USD
def calc_Cinv_GHP(GHP_Size_W, GHP_cost_data, BH_cost_data):
"""
Calculates the annualized investment costs for the geothermal heat pump
:type GHP_Size_W : float
:param GHP_Size_W: Design electrical size of the heat pump in [Wel]
InvCa : float
annualized investment costs in EUROS/a
"""
# if the Q_design is below the lowest capacity available for the technology, then it is replaced by the least
# capacity for the corresponding technology from the database
if GHP_Size_W < GHP_cost_data['cap_min'][0]:
GHP_Size_W = GHP_cost_data['cap_min'][0]
GHP_cost_data = GHP_cost_data[
(GHP_cost_data['cap_min'] <= GHP_Size_W) & (GHP_cost_data['cap_max'] > GHP_Size_W)]
Inv_a = GHP_cost_data.iloc[0]['a']
Inv_b = GHP_cost_data.iloc[0]['b']
Inv_c = GHP_cost_data.iloc[0]['c']
Inv_d = GHP_cost_data.iloc[0]['d']
Inv_e = GHP_cost_data.iloc[0]['e']
Inv_IR = GHP_cost_data.iloc[0]['IR_%']
Inv_LT = GHP_cost_data.iloc[0]['LT_yr']
Inv_OM = GHP_cost_data.iloc[0]['O&M_%'] / 100
InvC_GHP = Inv_a + Inv_b * (GHP_Size_W) ** Inv_c + (Inv_d + Inv_e * GHP_Size_W) * log(GHP_Size_W)
Capex_a_GHP_USD = calc_capex_annualized(InvC_GHP, Inv_IR, Inv_LT)
Opex_fixed_GHP_USD = InvC_GHP * Inv_OM
# if the Q_design is below the lowest capacity available for the technology, then it is replaced by the least
# capacity for the corresponding technology from the database
if GHP_Size_W < BH_cost_data['cap_min'][0]:
GHP_Size_W = BH_cost_data['cap_min'][0]
BH_cost_data = BH_cost_data[
(BH_cost_data['cap_min'] <= GHP_Size_W) & (BH_cost_data['cap_max'] > GHP_Size_W)]
Inv_a = BH_cost_data.iloc[0]['a']
Inv_b = BH_cost_data.iloc[0]['b']
Inv_c = BH_cost_data.iloc[0]['c']
Inv_d = BH_cost_data.iloc[0]['d']
Inv_e = BH_cost_data.iloc[0]['e']
Inv_IR = BH_cost_data.iloc[0]['IR_%']
Inv_LT = BH_cost_data.iloc[0]['LT_yr']
Inv_OM = BH_cost_data.iloc[0]['O&M_%'] / 100
InvC_BH = Inv_a + Inv_b * (GHP_Size_W) ** Inv_c + (Inv_d + Inv_e * GHP_Size_W) * log(GHP_Size_W)
Capex_a_BH_USD = calc_capex_annualized(InvC_BH, Inv_IR, Inv_LT)
Opex_fixed_BH_USD = InvC_BH * Inv_OM
Capex_a_GHP_total_USD = Capex_a_BH_USD + Capex_a_GHP_USD
Opex_fixed_GHP_total_USD = Opex_fixed_BH_USD + Opex_fixed_GHP_USD
Capex_GHP_total_USD = InvC_BH + InvC_GHP
return Capex_a_GHP_total_USD, Opex_fixed_GHP_total_USD, Capex_GHP_total_USD
|
mit
|
kevin-intel/scikit-learn
|
examples/decomposition/plot_pca_vs_fa_model_selection.py
|
59
|
4523
|
"""
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
print(__doc__)
# #############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
# #############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA(svd_solver='full')
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(svd_solver='full', n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa,
linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
|
bsd-3-clause
|
heli522/scikit-learn
|
sklearn/covariance/tests/test_covariance.py
|
142
|
11068
|
# Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
|
bsd-3-clause
|
eawag-rdm/kobo-post
|
kobopost/mark_skipped.py
|
1
|
16491
|
# _*_ coding: utf-8 _*_
"""Usage:
mark_skipped [options] <questionaire> <form_definition> <outpath>
mark_skipped (-h | --help)
Processes <questionaire>, an xlsx-file, based on <form_definition>, an xls file,
and writes the output to an apropriately named file in <outpath>
Options:
-h --help This help.
--na=<na_marker> The string empty cells are replaced with [default: NA].
--format=<output_format> Recognized formats are "XLSX" and "CSV" [default: CSV].
--fullquestions Write a second header row that contains the full questions ("labels").
--keepnotes Do not delete columns that represent "notes" instead of questions.
"""
from docopt import docopt
import pandas as pd
import numpy as np
import re
import os
import sys
import logging
from .parser import XLSFormParser
from .parser import XLSFormLexer
logger = logging.getLogger(__name__)
class FormDef(object):
"""Represents a KoBoToolbox - form definition"""
def __init__(self, formname):
self.form = pd.read_excel(formname, na_values=None,
keep_default_na=[]).applymap(lambda x: str(x))
self.P = XLSFormParser()
self.L = XLSFormLexer()
self.prefixpat = r'(group_\S+\[\d+\]/)'
def get_form(self):
"""returns the current formdef dataframe"""
return self.form
def _check_colnames(self, conds):
"check for empty strings in 'name' - column"
wrong = conds[conds.name == '']
if not wrong.empty:
logger.warning('bad columnname in "name":\n{}\n--> dropping'
.format(wrong))
conds.drop(wrong.index, inplace=True)
return conds
def _elim_condition_loops(self):
"""Puts conditions that are set for loops inside the loop"""
idxstart = self.form.loc[self.form['type'] == 'begin repeat', :].index
idxend = self.form.loc[self.form['type'] == 'end repeat',:].index
# find condition loops
condloopsstart = []
for i, startidx in enumerate(idxstart):
if self.form.loc[startidx, 'relevant'] != '':
condloopsstart.append(i)
indexranges = [range(a, b) for a, b in zip(idxstart[condloopsstart],
idxend[condloopsstart])]
for ran in indexranges:
condition = self.form.loc[ran[0], 'relevant']
self.form.loc[ran[0], 'relevant'] = ''
for i in range(ran[0]+1, ran[-1]+1):
oldcond = self.form.loc[i, 'relevant']
joinlist = [c for c in [oldcond, condition] if c != '']
self.form.loc[i, 'relevant'] = ' and '.join(joinlist)
def mk_loopgrouping(self, survey):
"""Modifies the form description to explicitly define
columns defines in loops over groups and the respective conditions.
"""
def get_replacement_rules(groupname, oldrule, colnam, prefixes):
"""Create a sub-dataframe that contains the group specific
replacement definition/conditions.
"""
repdf = pd.DataFrame()
# create new columnname / condition pairs
for p in prefixes:
newrule = oldrule.copy(deep=True)
newrule.set_value('name', p + colnam)
newcondition = newrule['relevant']
# find and replace prefixed columns in condition
for cn in loopdict[groupname]['colnames']:
newcondition = newcondition.replace(cn, p + cn)
repdf = repdf.append(newrule.set_value('relevant', newcondition))
return(repdf)
def collect_loop_info():
"""Gather info about columnames defined in repeat-loops"""
idxstart = self.form.loc[self.form['type'] == 'begin repeat',:].index
idxend = self.form.loc[self.form['type'] == 'end repeat',:].index
indexranges = [range(a+1, b) for a, b in zip(idxstart, idxend)]
groups = [self.form.loc[i, 'name'] for i in idxstart]
colnamegroups = [self.form.loc[a+1:b-1, 'name']
for a, b in zip(idxstart, idxend)]
assert(len(idxstart) == len(idxend) == len(groups))
loopdict = {groups[i]: {'indices': list(indexranges[i]),
'colnames': list(colnamegroups[i])}
for i in range(0, len(groups))}
indexdict = {idx: grp
for i, grp in enumerate(groups)
for idx in indexranges[i]}
return (loopdict, indexdict)
def get_rules_in_loops_by_idx(indexdict):
"find conditional rows in repeat-loops"
idx_relevant = self.form.loc[pd.notnull(self.form.relevant)].index
idx_relevant = [i for i in idx_relevant if i in indexdict]
return idx_relevant
def expand_form(idx, indexdict):
"expands one rule at idx in a loop"
groupname = indexdict[idx]
oldrule = self.form.loc[idx,:]
colnam = oldrule['name']
prefixes = []
for mat in [re.match(self.prefixpat + colnam, c)
for c in survey.get_columnnames()]:
if mat:
prefixes.append(mat.group(1))
repdf = get_replacement_rules(groupname, oldrule, colnam, prefixes)
# replace the old rule - line with new ones
self.form = self.form.drop(idx)
self.form =self.form.append(repdf)
self._elim_condition_loops()
loopdict, indexdict = collect_loop_info()
idx_relevant = get_rules_in_loops_by_idx(indexdict)
for idx in idx_relevant:
expand_form(idx, indexdict)
def read_skipconditions(self):
conds = self.form.loc[:,('name', 'relevant')]
conds = self._check_colnames(conds)
conds = conds[conds.relevant != '']
conds.relevant = conds.relevant.map(self.P.parse)
return conds
class Survey(object):
"""Represents a KoBoToolbox Questionaire.
We assume that it was downloaded using
"Download" -> "CSV" -> "Advanced Export", with
"DON'T split select multiple choice answers into separate columns" checked.
"""
def __init__(self, arguments):
self.arguments = arguments
self.surveyname = arguments['<questionaire>']
self.data, self.sheetnames = self._read_workbook()
self.quest = self.join_main_and_groups()
self.F = FormDef(arguments['<form_definition>'])
self.orig_formdef = self.F.get_form()
self._repl_na()
def _read_workbook(self):
"""reads all worksheets of an Excel file.
first sheet is interpreted as 'main', others as
partial results from groups.
"""
data = {}
with pd.ExcelFile(self.surveyname) as xls:
sheets = xls.sheet_names
data['main'] = pd.read_excel(
xls, sheets[0], na_values=[],
keep_default_na=False,
index_col='_index').applymap(lambda x: str(x))
for s in sheets[1:]:
data[s] = pd.read_excel(
xls, s, na_values=[],
keep_default_na=False).applymap(lambda x: str(x))
data[s]['_parent_index'] = pd.to_numeric(data[s]['_parent_index'])
return([data, sheets])
def _massage_group_tables(self):
"""parses group-tables into tables suited for joining
with main table.
"""
data = {key: self.data[key] for key in self.sheetnames[1:]}
grouptables = {}
for d in data:
# get maximum number of group elements
par_idxs = [int(i) for i in data[d]._parent_index.tolist()]
nmax = max([par_idxs.count(x) for x in set(par_idxs)])
newindices = list(set(par_idxs))
# add groupcolumns
groupcols = [c for c in data[d].columns if c[0] != '_']
gcpairs = [c.split('/') for c in groupcols]
newcols = [d + '[' + str(i) + ']' + '/' + gc[1]
for gc in gcpairs for i in range(1, nmax + 1)]
# new DataFrame
newdf = pd.DataFrame(index=newindices, columns=newcols)
# populate new df
idxcount = {}
for i, row in newdf.iterrows():
newdict = {}
gcount = 0
partrows = data[d].loc[data[d]._parent_index == i, groupcols]
for _, oldrow in partrows.iterrows():
ocpairs = [c.split('/') for c in oldrow[oldrow != ''].index]
gcount += 1
newdict.update({c[0]+'['+str(gcount)+']/'+c[1]:
oldrow[c[0]+'/'+c[1]] for c in ocpairs})
newdf.loc[i] = pd.Series(newdict)
newdf = newdf.fillna('')
grouptables[d] = newdf
return(grouptables)
def join_main_and_groups(self):
grouptables = self._massage_group_tables()
for gname, gtable in grouptables.items():
self.data['main'] = pd.merge(
self.data['main'],
gtable,
left_index=True,
right_index=True,
how='outer')
return(self.data['main'])
def _repl_na(self):
"replace n/a with empty string"
self.quest.replace(to_replace=['n/a', np.nan],
value=['', ''], inplace=True)
def _get_column(self, colname):
"returns series of lists for cells in colname"
column = self.quest.loc[:, colname]
return column
def _check_selected(self, column, val):
"check whether val in values"
return column.map(lambda x: val in x.split())#.reset_index(drop=True)
def get_columnnames(self):
return self.quest.columns
def eval_skiprules(self):
'''Returns a DataFrame with relevant columns
that contain boolean indicator whether cell
was skipped or not.
'''
# fix loops in FormDef object
self.F.mk_loopgrouping(self)
# get skiprules
self.skiprules = self.F.read_skipconditions()
# define functionnames for evaluation of skiprules
get_column = self._get_column
check_selected = self._check_selected
logical_or = np.logical_or
logical_and = np.logical_and
logical_not = np.logical_not
skip = pd.DataFrame()
for i, row in self.skiprules.iterrows():
skip[row['name']] = eval(row['relevant']).apply(lambda x: not x)
return(skip)
def _insert_question_row(self, form):
"""Inserts a second header row that contains the
full questions ("labels")
"""
mapdict = {i[1]['name']: i[1]['label']
for i in self.F.form.loc[:,['name','label']].iterrows()}
newrow = list(form.columns)
newrow = [mapdict.get(x, '') for x in newrow]
form.loc[0] = newrow
form.sort_index(inplace=True)
return form
def _handle_notes(self, form):
newform = form.copy(deep=True)
# Column names that are "notes"
notenames = self.F.form.loc[self.F.form['type'] == 'note', 'name']
# check whether empty
assert((self.quest.loc[:, notenames].values == '').all())
if self.arguments['--keepnotes']:
newform.loc[:, notenames] = '_NOTE_'
else:
newform.drop(notenames, axis=1, inplace=True)
return newform
def _mk_final_table(self):
"""Creates final table"""
na_marker = self.arguments['--na']
skip = self.eval_skiprules()
# same axis-0 count?
assert(skip.shape[0] == self.quest.shape[0])
# same index?
assert(all(skip.index == self.quest.index))
# all columns in skip also in original quest?
missingcols = list(set(skip.columns) - set(self.quest.columns))
if len(missingcols) > 0:
logger.warn('Columns in form definition '
'that do not appear in survey:\n{}'
.format(missingcols))
# indices are the same?
assert(all(skip.index == self.quest.index))
# apply the skipped - marker
newform = self.quest.copy(deep = True)
for colname in skip.columns:
repcol = newform[colname].where(np.logical_not(skip[colname]),
other='_SKIPPED_')
newform[colname] = newform[colname].where(
np.logical_not(skip[colname]), other='_SKIPPED_')
# check skipped values where 'n/a' or ''
isempty = self.quest == ''
isskipped = newform == '_SKIPPED_'
assert(all(isskipped == isempty))
# Handle columns representing "notes"
newform = self._handle_notes(newform)
# convert '' to 'NA'
newform.replace(to_replace='', value=na_marker, inplace=True)
newform.fillna(value=na_marker, inplace=True)
return newform
def _re_sort_columns(self, form):
"""Puts "group"-columns into the position they appear in the form
definition.
"""
newform = form.copy(deep=True)
formdefnames = self.orig_formdef.loc[:, 'name'] #names in form definition
survcols = form.columns #columns names in survey
grouppat = r'(?P<is_group>(?P<group_id>group_\w+)\[\d+\]/)?(?P<question>[\w/]+)'
def fullmatchdict(mo):
md = mo.groupdict()
md.update({'match': mo.group(0)})
return md
survcolgroupmatch = [[pos, fullmatchdict(re.match(grouppat, c))]
for pos, c in enumerate(survcols)]
def _move_group(survmatch, pos):
# create hole in survcol - positions
grouplen = len(survmatch)
for s in survcolgroupmatch:
s[0] = s[0] + grouplen if s[0] > pos else s[0]
# move group questions
survmatch.sort(key=lambda x: x[0])
for gcol in survmatch:
index = survcolgroupmatch.index(gcol)
survcolgroupmatch[index][0] = pos+1
pos += 1
return pos
pos = 0
for nam in formdefnames:
survmatch = [s for s in survcolgroupmatch if s[1]['question'] == nam]
if len(survmatch) == 0:
continue
elif (not survmatch[0][1]['is_group']) and len(survmatch) == 1:
pos = survmatch[0][0]
elif survmatch[0][1]['is_group']:
pos = _move_group(survmatch, pos)
else:
raise Exception('ERROR: Found multiple instances of {}'.format(nam))
# new list of columnnames
survcolgroupmatch.sort(key=lambda x: x[0])
newcols = [x[1]['match'] for x in survcolgroupmatch]
assert(set(list(survcols)) == set(newcols))
# re-ordered form
newform = newform[newcols]
return newform
def write_new_questionaire(self):
base = os.path.splitext(self.arguments['<questionaire>'])[0]
basename = os.path.basename(base)
extension = self.arguments['--format'].lower()
outpath = os.path.join(self.arguments['<outpath>'], basename + '.' + extension)
newform = self._mk_final_table()
if self.arguments['--fullquestions']:
newform = self._insert_question_row(newform)
newform = self._re_sort_columns(newform)
ext = os.path.splitext(outpath)[1]
if ext == '.csv':
with open(outpath, 'w') as f:
newform.to_csv(f, index=True, index_label='INDEX',
line_terminator='\r\n')
elif ext == '.xlsx':
newform.to_excel(outpath, index=True, sheet_name="Sheet1",
index_label='INDEX')
else:
raise NotImplementedError('Output format "{}" not recognized.'
.format(ext[1:].upper()))
def main():
arguments = docopt(__doc__, help=True)
surv = Survey(arguments)
surv.write_new_questionaire()
if __name__ == '__main__':
main()
|
agpl-3.0
|
edwardsmith999/pyDataView
|
misclib/misc.py
|
1
|
6185
|
#! /usr/bin/env python
# coding:utf-8
# Routines which are not specific to MD/CFD or CPL code
import os
import numpy as np
from matplotlib.colors import colorConverter
from . import latex2utf
from math import log10, floor
import math as maths
import re
def tryint(s):
try:
return int(s)
except:
return s
def alphanum_key(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return [ tryint(c) for c in re.split('([0-9]+)', s) ]
def sort_nicely(l):
""" Sort the given list in the way that humans expect.
"""
l.sort(key=alphanum_key)
class Chdir:
"""
Wrapper to move from current directory to new directory
and return when using with
Example usage:
with Chdir('./../'):
os.system('./a.out')
"""
def __init__( self, newPath ):
self.savedPath = os.getcwd()
self.newPath = newPath
def __enter__( self ):
os.chdir(self.newPath)
def __exit__( self, etype, value, traceback):
os.chdir( self.savedPath )
#Some simple functions to generate colours.
def pastel(colour, weight=2.4):
""" Convert colour into a nice pastel shade"""
rgb = np.asarray(colorConverter.to_rgb(colour))
# scale colour
maxc = max(rgb)
if maxc < 1.0 and maxc > 0:
# scale colour
scale = 1.0 / maxc
rgb = rgb * scale
# now decrease saturation
total = sum(rgb)
slack = 0
for x in rgb:
slack += 1.0 - x
# want to increase weight from total to weight
# pick x s.t. slack * x == weight - total
# x = (weight - total) / slack
x = (weight - total) / slack
rgb = [c + (x * (1.0-c)) for c in rgb]
return rgb
#Helper functions for matplotlib figures and axes to changed to dashed types
def setAxLinesBW(ax):
"""
Take each Line2D in the axes, ax, and convert the line style to be
suitable for black and white viewing.
"""
MARKERSIZE = 3
COLORMAP = {
'b': {'marker': None, 'dash': (None,None)},
'g': {'marker': None, 'dash': [5,5]},
'r': {'marker': None, 'dash': [5,3,1,3]},
'c': {'marker': None, 'dash': [1,3]},
'm': {'marker': None, 'dash': [5,2,5,2,5,10]},
'y': {'marker': None, 'dash': [5,3,1,2,1,10]},
'k': {'marker': 'o', 'dash': (None,None)} #[1,2,1,10]}
}
for line in ax.get_lines() + ax.get_legend().get_lines():
origColor = line.get_color()
line.set_color('black')
line.set_dashes(COLORMAP[origColor]['dash'])
line.set_marker(COLORMAP[origColor]['marker'])
line.set_markersize(MARKERSIZE)
def setFigLinesBW(fig):
"""
Take each axes in the figure, and for each line in the axes, make the
line viewable in black and white.
"""
for ax in fig.get_axes():
setAxLinesBW(ax)
def update_line(hl, new_data):
hl.set_xdata(np.append(hl.get_xdata(), new_data))
hl.set_ydata(np.append(hl.get_ydata(), new_data))
plt.draw()
def get_colours(n):
""" Return n pastel colours. """
base = np.asarray([[1,0,0], [0,1,0], [0,0,1]])
if n <= 3:
return base[0:n]
# how many new colours to we need to insert between
# red and green and between green and blue?
needed = (((n - 3) + 1) / 2, (n - 3) / 2)
colours = []
for start in (0, 1):
for x in np.linspace(0, 1, needed[start]+2):
colours.append((base[start] * (1.0 - x)) +
(base[start+1] * x))
return [pastel(c) for c in colours[0:n]]
def round_to_n(x,p):
"""
returns a string representation of x formatted with a precision of p
Based on the webkit javascript implementation taken from here:
https://code.google.com/p/webkit-mirror/source/browse/JavaScriptCore/kjs/number_object.cpp
"""
#No need to round an integer
if isinstance(x,int):
return x
else:
x = float(x)
if x == 0.:
return "0." + "0"*(p-1)
out = []
if x < 0:
out.append("-")
x = -x
e = int(maths.log10(x))
tens = maths.pow(10, e - p + 1)
n = maths.floor(x/tens)
if n < maths.pow(10, p - 1):
e = e -1
tens = maths.pow(10, e - p+1)
n = maths.floor(x / tens)
if abs((n + 1.) * tens - x) <= abs(n * tens -x):
n = n + 1
if n >= maths.pow(10,p):
n = n / 10.
e = e + 1
m = "%.*g" % (p, n)
if e < -2 or e >= p:
out.append(m[0])
if p > 1:
out.append(".")
out.extend(m[1:p])
out.append('e')
if e > 0:
out.append("+")
out.append(str(e))
elif e == (p -1):
out.append(m)
elif e >= 0:
out.append(m[:e+1])
if e+1 < len(m):
out.append(".")
out.extend(m[e+1:])
else:
out.append("0.")
out.extend(["0"]*-(e+1))
out.append(m)
return "".join(out)
def latextounicode(strings):
if type(strings) is str:
string = strings.encode('utf8')
try:
strings = strings.replace('rho','ρ')
except UnicodeDecodeError:
pass
except SyntaxError:
pass
if type(strings) is str:
try:
strings = strings.replace('rho','ρ')
except UnicodeDecodeError:
pass
except SyntaxError:
pass
elif type(strings) is list:
for i, string in enumerate(strings):
try:
strings[i] = string.replace('rho','ρ')
except UnicodeDecodeError:
pass
except SyntaxError:
pass
#latex2utf.latex2utf(string)
return strings
def unicodetolatex(strings):
if type(strings) is str:
#string = strings.encode('utf-8')
strings = strings.replace('ρ','rho')
if type(strings) is str:
strings = strings.replace('ρ','rho')
elif type(strings) is list:
for i, string in enumerate(strings):
#string = string.encode('utf-8')
strings[i] = string.replace('ρ','rho')
return strings
|
gpl-3.0
|
shyamalschandra/scikit-learn
|
sklearn/neighbors/tests/test_kd_tree.py
|
159
|
7852
|
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
|
bsd-3-clause
|
chrsrds/scikit-learn
|
examples/linear_model/plot_iris_logistic.py
|
12
|
1761
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
first two dimensions (sepal length and width) of the `iris
<https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The datapoints
are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
logreg = LogisticRegression(C=1e5)
# Create an instance of Logistic Regression Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = .02 # step size in the mesh
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
mrcslws/nupic.research
|
nupic/research/frameworks/vernon/ray_custom_loggers.py
|
6
|
8764
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import codecs
import csv
import logging
import os
import pickle
from io import BytesIO
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from ray.tune.logger import CSVLogger, JsonLogger, Logger
from ray.tune.result import TIME_TOTAL_S, TRAINING_ITERATION
from ray.tune.utils import flatten_dict
logger = logging.getLogger(__name__)
# Note:
# This file is introduced as a potential replacement for `ray_custom_loggers.py`.
# Therein, the code is written for tensorflow>=1.13.1; however, we've moved to using
# tensorflow 2.0 and up.
#
def record_tf_values(result, path, step, num_hist_bins=None):
"""
Tensorboard will display under scalars, images, histograms, & distributions
tabs.
We manually generate the summary objects from raw data passed from tune
via the logger.
Currently supports:
* Scalar (any prefix, already supported by TFLogger)
* Seaborn plot ("seaborn_" prefix; see below for configuration details)
* Image ("img_" prefix)
* Histograms ("hist_" prefix)
```
writer = tf.summary.create_file_writer("/some/path")
result = {
"my_scalor": 4,
"hist_some_array": [1, 2, 3, 4],
"seaborn_dict": dict(
plot_type="lineplot",
x=[1, 2, 3],
y=[1, 2, 3],
),
"img_some_array": np.random.rand(3, 3, 3, 3)
}
with writer.as_default():
record_tf_values(result=result, path=["ray", "tune"], step=1)
write.flush()
"""
for attr, value in result.items():
if value is not None:
if attr.startswith("seaborn_"):
# Value should be a dict which defines the plot to make. For example:
#
# value = {
#
# # Plot setup.
# plot_type: string, # name of seaborn plotting function
# config: {...}, # (optional) passed to seaborn.set
# edit_axes_func: <callable> # (optional) edits axes e.g. set xlim
#
# # Params - to be passed to seaborn plotting method.
# data: pandas.DataFrame(...),
# x: <string>, # label of desired column of data
# y: <string>, # label of desired column of data , same size as x
# hue: <None or array like> # same size as x and y
#
# }
#
if not isinstance(value, dict):
continue
# Get seaborn plot type and config.
config = value.pop("config", {})
plot_type = value.pop("plot_type", None)
edit_axes_func = value.pop("edit_axes_func", lambda x: x)
if plot_type is None or not hasattr(sns, plot_type):
continue
# Plot seaborn plot.
plot_type = getattr(sns, plot_type)
plt.clf()
sns.set(**config)
ax = plot_type(**value)
edit_axes_func(ax)
# Convert to figure to numpy array of an equivalent.
# Save the plot to a PNG in memory.
stream = BytesIO()
plt.savefig(stream, format="png")
stream.seek(0)
# Convert PNG buffer to TF image
image_tf = tf.image.decode_png(stream.getvalue(), channels=4)
# Add the batch dimension
image_tf = tf.expand_dims(image_tf, 0)
# Save array as an image.
name = "/".join(path + [attr])
tf.summary.image(name=name, data=image_tf, step=step)
if attr.startswith("img_"):
# Convert to numpy array and save.
name = "/".join(path + [attr])
value_np = np.array(value)
tf.summary.image(name=name, data=value_np, step=step)
elif attr.startswith("hist_"):
# Convert to a numpy array
name = "/".join(path + [attr])
value_np = np.array(value)
tf.summary.histogram(
name=name, data=value_np, step=step, buckets=num_hist_bins)
else:
if type(value) in [int, float, np.float32, np.float64, np.int32]:
tf.summary.scalar(
name="/".join(path + [attr]), data=value, step=step
)
elif type(value) is dict:
record_tf_values(result=value, path=path + [attr], step=step)
class TFLoggerPlus(Logger):
"""Tensorboard logger that supports histograms and images based on key
prefixes 'hist_' and 'img_'.
Pass instead of TFLogger e.g. tune.run(..., loggers=(JsonLogger,
CSVLogger, TFLoggerPlus))
"""
def _init(self):
try:
global tf
if "RLLIB_TEST_NO_TF_IMPORT" in os.environ:
logger.warning("Not importing TensorFlow for test purposes")
tf = None
else:
import tensorflow
tf = tensorflow
except ImportError:
logger.warning(
"Couldn't import TensorFlow - " "disabling TensorBoard logging."
)
self._file_writer = tf.summary.create_file_writer(self.logdir)
def on_result(self, result):
# Copy and remove extraneous results.
result = result.copy()
for k in ["config", "pid", "timestamp", TIME_TOTAL_S]:
if k in result:
del result[k] # not useful to tf log these
# Record results.
step = result[TRAINING_ITERATION]
with self._file_writer.as_default():
record_tf_values(
result=result,
path=["ray", "tune"],
step=step
)
self.flush()
def flush(self):
self._file_writer.flush()
def close(self):
self._file_writer.close()
class CSVLoggerPlus(CSVLogger):
# Define object types in which to save in pickled form.
pickle_types = (np.ndarray, pd.DataFrame)
def on_result(self, result):
tmp = result.copy()
if "config" in tmp:
del tmp["config"]
result = flatten_dict(tmp, delimiter="/")
if self._csv_out is None:
self._csv_out = csv.DictWriter(self._file, result.keys())
if not self._continuing:
self._csv_out.writeheader()
encode_results = {}
for k, v in result.items():
if k not in self._csv_out.fieldnames:
continue
if isinstance(v, self.pickle_types):
v = pickle.dumps(v)
v = codecs.encode(v, "base64").decode()
encode_results[k] = v
self._csv_out.writerow(encode_results)
self._file.flush()
DEFAULT_LOGGERS = (JsonLogger, CSVLoggerPlus, TFLoggerPlus)
if __name__ == "__main__":
# ---------------------
# Test of TFLoggerPlus
# ---------------------
from tempfile import TemporaryDirectory
tempdir = TemporaryDirectory()
logger = TFLoggerPlus(config={}, logdir=tempdir.name)
result = {
"hello": 4,
"hist_": [1, 2, 3, 4],
"seaborn_test": dict(
plot_type="lineplot",
x=[1, 2, 3],
y=[1, 2, 3],
),
"img_test": np.random.rand(3, 3, 3, 3)
}
with logger._file_writer.as_default():
record_tf_values(result=result, path=["ray", "tune"], step=1)
print("Logged results successfully.")
input("Try \ntensorboard --logdir={}".format(tempdir.name))
|
agpl-3.0
|
ywcui1990/nupic.research
|
projects/sequence_prediction/continuous_sequence/run_lstm_scalarEncoder.py
|
10
|
10432
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import csv
from optparse import OptionParser
from matplotlib import pyplot as plt
import numpy as np
from pybrain.datasets import SequentialDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure.modules import LSTMLayer
from pybrain.structure.modules import SigmoidLayer
from pybrain.supervised import RPropMinusTrainer
from swarm_runner import SwarmRunner
import pandas as pd
from htmresearch.support.sequence_learning_utils import *
from nupic.encoders.scalar import ScalarEncoder
from scipy import random
# set the random seed here to get reproducible lstm result
random.seed(6)
plt.ion()
def initializeLSTMnet(nDimInput, nDimOutput, nLSTMcells=10):
# Build LSTM network with nDim input units, nLSTMcells hidden units (LSTM cells) and nDim output cells
net = buildNetwork(nDimInput, nLSTMcells, nDimOutput,
hiddenclass=LSTMLayer, bias=True, outclass=SigmoidLayer, recurrent=True)
return net
def readDataSet(dataSet):
filePath = 'data/'+dataSet+'.csv'
# df = pd.read_csv(filePath, header=0, skiprows=[1, 2], names=['time', 'data'])
# sequence = df['data']
if dataSet=='nyc_taxi':
df = pd.read_csv(filePath, header=0, skiprows=[1,2], names=['time', 'data', 'timeofday', 'dayofweek'])
sequence = df['data']
dayofweek = df['dayofweek']
timeofday = df['timeofday']
seq = pd.DataFrame(np.array(pd.concat([sequence, timeofday, dayofweek], axis=1)),
columns=['data', 'timeofday', 'dayofweek'])
elif dataSet=='sine':
df = pd.read_csv(filePath, header=0, skiprows=[1, 2], names=['time', 'data'])
sequence = df['data']
seq = pd.DataFrame(np.array(sequence), columns=['data'])
else:
raise(' unrecognized dataset type ')
return seq
def getSingleSample(i, sequence, useTimeOfDay, useDayOfWeek):
if encoderInput is None:
dataSDRInput = [sequence['normdata'][i]]
else:
dataSDRInput = encoderInput.encode(sequence['data'][i])
if useTimeOfDay and useDayOfWeek:
sample = np.concatenate((dataSDRInput, [sequence['timeofday'][i]], [sequence['dayofweek'][i]]), axis=0)
elif useTimeOfDay:
sample = np.concatenate((dataSDRInput, [sequence['timeofday'][i]]), axis=0)
elif useDayOfWeek:
sample = np.concatenate((dataSDRInput, [sequence['dayofweek'][i]]), axis=0)
else:
sample = dataSDRInput
return sample
def getPyBrainDataSetScalarEncoder(sequence, nTrain, encoderInput, encoderOutput,
predictionStep=1, useTimeOfDay=True, useDayOfWeek=True):
"""
Use scalar encoder for the data
:param sequence:
:param nTrain:
:param predictionStep:
:param useTimeOfDay:
:param useDayOfWeek:
:return:
"""
print "generate a pybrain dataset of sequences"
print "the training data contains ", str(nTrain-predictionStep), "records"
if encoderInput is None:
inDim = 1 + int(useTimeOfDay) + int(useDayOfWeek)
else:
inDim = encoderInput.n + int(useTimeOfDay) + int(useDayOfWeek)
if encoderOutput is None:
outDim = 1
else:
outDim = encoderOutput.n
ds = SequentialDataSet(inDim, outDim)
if useTimeOfDay:
print "include time of day as input field"
if useDayOfWeek:
print "include day of week as input field"
for i in xrange(nTrain-predictionStep):
sample = getSingleSample(i, sequence, useTimeOfDay, useDayOfWeek)
if encoderOutput is None:
dataSDROutput = [sequence['normdata'][i+predictionStep]]
else:
dataSDROutput = encoderOutput.encode(sequence['data'][i+predictionStep])
ds.addSample(sample, dataSDROutput)
return ds
def _getArgs():
parser = OptionParser(usage="%prog PARAMS_DIR OUTPUT_DIR [options]"
"\n\nCompare TM performance with trivial predictor using "
"model outputs in prediction directory "
"and outputting results to result directory.")
parser.add_option("-d",
"--dataSet",
type=str,
default='nyc_taxi',
dest="dataSet",
help="DataSet Name, choose from sine, SantaFe_A, MackeyGlass")
# parser.add_option("-n",
# "--predictionstep",
# type=int,
# default=5,
# dest="predictionstep",
# help="number of steps ahead to be predicted")
parser.add_option("-r",
"--repeatNumber",
type=int,
default=20,
dest="repeatNumber",
help="number of training epoches")
(options, remainder) = parser.parse_args()
print options
return options, remainder
if __name__ == "__main__":
(_options, _args) = _getArgs()
dataSet = _options.dataSet
rptNum = _options.repeatNumber
print "run LSTM on ", dataSet
SWARM_CONFIG = SwarmRunner.importSwarmDescription(dataSet)
predictedField = SWARM_CONFIG['inferenceArgs']['predictedField']
nTrain = SWARM_CONFIG["streamDef"]['streams'][0]['last_record']
predictionStep = SWARM_CONFIG['inferenceArgs']['predictionSteps'][0]
useTimeOfDay = True
useDayOfWeek = True
nTrain = 5000
# prepare dataset as pyBrain sequential dataset
sequence = readDataSet(dataSet)
# encoderInput = ScalarEncoder(w=1, minval=0, maxval=40000, n=15, forced=True)
# the number of buckets should be the same as classifier input encoder
encoderOutput = ScalarEncoder(w=1, minval=0, maxval=40000, n=22, forced=True)
# use the normalized raw data without encoding by setting the encoders to None
encoderInput = None
# encoderOutput = None
# normalized the data
meanSeq = np.mean(sequence['data'])
stdSeq = np.std(sequence['data'])
sequence.loc[:,'normdata'] = pd.Series((sequence['data'] - meanSeq)/stdSeq, index=sequence.index)
meanTimeOfDay = np.mean(sequence['timeofday'])
stdTimeOfDay = np.std(sequence['timeofday'])
sequence['timeofday'] = (sequence['timeofday'] - meanTimeOfDay)/stdTimeOfDay
meanDayOfWeek = np.mean(sequence['dayofweek'])
stdDayOfWeek = np.std(sequence['dayofweek'])
sequence['dayofweek'] = (sequence['dayofweek'] - meanDayOfWeek)/stdDayOfWeek
ds = getPyBrainDataSetScalarEncoder(sequence, nTrain, encoderInput, encoderOutput,
predictionStep, useTimeOfDay, useDayOfWeek)
print "train LSTM with "+str(rptNum)+" repeats"
net = initializeLSTMnet(nDimInput=len(ds.getSample()[0]), nDimOutput=len(ds.getSample()[1]), nLSTMcells=20)
trainer = RPropMinusTrainer(net, dataset=ds, verbose=True)
error = []
for rpt in xrange(rptNum):
err = trainer.train()
error.append(err)
print "test LSTM"
net.reset()
targetInput = np.zeros((len(sequence),))
trueData = np.zeros((len(sequence),))
predictedInput = np.zeros((len(sequence),))
bucketValues = encoderOutput.getBucketValues()
if encoderOutput is not None:
predictedDistribution = np.zeros((len(sequence), encoderOutput.n))
targetDistribution = np.zeros((len(sequence), encoderOutput.n))
for i in xrange(len(sequence)-predictionStep):
sample = getSingleSample(i, sequence, useTimeOfDay, useDayOfWeek)
netActivation = net.activate(sample)
if encoderOutput is None:
predictedInput[i] = netActivation
else:
predictedInput[i] = bucketValues[np.where(netActivation == max(netActivation))[0][0]]
predictedDistribution[i, :] = netActivation/sum(netActivation)
targetDistribution[i, :] = encoderOutput.encode(sequence['data'][i+predictionStep])
trueData[i] = sequence['data'][i]
targetInput[i] = sequence['data'][i+predictionStep]
# print " target input: ", targetDistribution[i], " predicted Input: ", predictedInput[i]
if encoderOutput is None:
predictedInput = (predictedInput * stdSeq) + meanSeq
plt.close('all')
plt.figure(1)
plt.plot(targetInput[nTrain:], color='black')
plt.plot(predictedInput[nTrain:], color='red')
plt.title('LSTM, useTimeOfDay='+str(useTimeOfDay)+dataSet)
plt.xlim([0, 500])
plt.xlabel('Time')
plt.ylabel('Prediction')
else:
# calculate negative log-likelihood
Likelihood = np.multiply(predictedDistribution, targetDistribution)
Likelihood = np.sum(Likelihood, axis=1)
minProb = 0.00001
Likelihood[np.where(Likelihood < minProb)[0]] = minProb
negLL = -np.log(Likelihood)
negLLtest = np.mean(negLL[nTrain:])
print "LSTM, negLL Train %f Test %f" % (np.mean(negLL[:nTrain]), np.mean(negLL[nTrain:]))
plt.close('all')
fig = plt.figure(1)
NT = len(trueData)
plt.imshow(np.transpose(predictedDistribution), extent=(0, NT, encoderOutput.minval, encoderOutput.maxval),
interpolation='nearest', aspect='auto', origin='lower', cmap='Reds')
plt.plot(targetInput, color='black', label='GroundTruth')
plt.plot(predictedInput, color='blue', label='ML prediction')
plt.legend()
plt.title('LSTM, useTimeOfDay='+str(useTimeOfDay)+' '+dataSet+' test neg LL = '+str(negLLtest))
plt.xlim([NT-500, NT-predictionStep])
plt.xlabel('Time')
plt.ylabel('Prediction')
nrmse_train = NRMSE(targetInput[:nTrain], predictedInput[:nTrain])
nrmse_test = NRMSE(targetInput[nTrain:-predictionStep], predictedInput[nTrain:-predictionStep])
print "NRMSE, Train %f, Test %f" %(nrmse_train, nrmse_test)
# import plotly.plotly as py
# plot_url = py.plot_mpl(fig)
|
agpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.